diff --git a/.cirrus.yml b/.cirrus.yml index 6d6c979f9c..94d3e8b7bc 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -26,15 +26,14 @@ env: #### #### Cache-image names to test with (double-quotes around names are critical) #### - FEDORA_NAME: "fedora-39" + FEDORA_NAME: "fedora-40" FEDORA_AARCH64_NAME: "${FEDORA_NAME}-aarch64" - PRIOR_FEDORA_NAME: "fedora-38" + PRIOR_FEDORA_NAME: "fedora-39" RAWHIDE_NAME: "rawhide" DEBIAN_NAME: "debian-13" # Image identifiers - IMAGE_SUFFIX: "c20240212t122113z-f39f38d13" - + IMAGE_SUFFIX: "c20240701t155130z-f40f39d13" # EC2 images FEDORA_AMI: "fedora-aws-${IMAGE_SUFFIX}" @@ -59,6 +58,7 @@ env: DISTRO_NV: # any {PRIOR_,}{FEDORA,DEBIAN}_NAME value VM_IMAGE_NAME: # One of the "Google-cloud VM Images" (above) CTR_FQIN: # One of the "Container FQIN's" (above) + CI_DESIRED_RUNTIME: crun # As of 2024-05-28 there are no other supported runtimes CI_DESIRED_DATABASE: sqlite # 'sqlite' or 'boltdb' CI_DESIRED_STORAGE: overlay # overlay or vfs @@ -79,6 +79,48 @@ gcp_credentials: ENCRYPTED[a28959877b2c9c36f151781b0a05407218cda646c7d047fc556e4 aws_credentials: ENCRYPTED[4ca070bffe28eb9b27d63c568b52970dd46f119c3a83b8e443241e895dbf1737580b4d84eed27a311a2b74287ef9f79f] +validate-source_task: + name: "Validate source code changes" + alias: validate-source + # This task is primarily intended to catch human-errors early on, in a + # PR context. Skip running it everywhere else. + only_if: &is_pr "$CIRRUS_PR != ''" + gce_instance: + image_project: libpod-218412 + zone: "us-central1-a" + # golangci-lint is a very, very hungry beast. + cpu: 8 + memory: "16Gb" + # Required to be 200gig, do not modify - has i/o performance impact + # according to gcloud CLI tool warning messages. + disk: 200 + image_name: "${FEDORA_CACHE_IMAGE_NAME}" # from stdenvars + env: + TEST_FLAVOR: validate-source + # NOTE: The default way Cirrus-CI clones is *NOT* compatible with + # environment expectations in contrib/cirrus/lib.sh. Specifically + # the 'origin' remote must be defined, and all remote branches/tags + # must be available for reference from CI scripts. + clone_script: &full_clone | + set -exo pipefail + cd / + rm -rf $CIRRUS_WORKING_DIR + mkdir -p $CIRRUS_WORKING_DIR + git clone --recursive --branch=$DEST_BRANCH https://x-access-token:${CIRRUS_REPO_CLONE_TOKEN}@github.com/${CIRRUS_REPO_FULL_NAME}.git $CIRRUS_WORKING_DIR + cd $CIRRUS_WORKING_DIR + git remote update origin + if [[ -n "$CIRRUS_PR" ]]; then # running for a PR + git fetch origin pull/$CIRRUS_PR/head:pull/$CIRRUS_PR + git checkout pull/$CIRRUS_PR + else + git reset --hard $CIRRUS_CHANGE_IN_REPO + fi + # Standard setup stage call, used by nearly every task in CI. + setup_script: &setup '$GOSRC/$SCRIPT_BASE/setup_environment.sh' + # Standard main execution stage call, used by nearly every task in CI. + main_script: &main '/usr/bin/time --verbose --output="$STATS_LOGFILE" $GOSRC/$SCRIPT_BASE/runner.sh' + + # N/B: This matrix of build tasks are critical to CI, along with the following # aarch64 task. They build binaries for all CI platforms, and versions. On # success, the contents of the repository are preserved as an artifact for @@ -86,7 +128,7 @@ aws_credentials: ENCRYPTED[4ca070bffe28eb9b27d63c568b52970dd46f119c3a83b8e443241 # otherwise duplicative effort in most tasks. build_task: alias: 'build' - name: 'Build for $DISTRO_NV' + name: 'Build for $DISTRO_NV' # N/B: Referenced by URLencoded strings elsewhere gce_instance: &standardvm image_project: libpod-218412 zone: "us-central1-a" @@ -103,17 +145,12 @@ build_task: # Not used here, is used in other tasks VM_IMAGE_NAME: ${FEDORA_CACHE_IMAGE_NAME} CTR_FQIN: ${FEDORA_CONTAINER_FQIN} - # ID for re-use of build output - CI_DESIRED_RUNTIME: crun - env: DISTRO_NV: ${PRIOR_FEDORA_NAME} VM_IMAGE_NAME: ${PRIOR_FEDORA_CACHE_IMAGE_NAME} CTR_FQIN: ${PRIOR_FEDORA_CONTAINER_FQIN} - CI_DESIRED_RUNTIME: crun CI_DESIRED_DATABASE: boltdb CI_DESIRED_STORAGE: vfs - # Catch invalid "TMPDIR == /tmp" assumptions; PR #19281 - TMPDIR: /var/tmp - env: <<: *stdenvars DISTRO_NV: ${RAWHIDE_NAME} @@ -122,42 +159,18 @@ build_task: - env: DISTRO_NV: ${DEBIAN_NAME} VM_IMAGE_NAME: ${DEBIAN_CACHE_IMAGE_NAME} - CI_DESIRED_RUNTIME: runc CI_DESIRED_NETWORK: netavark - # Ignore cgroups-v1 warnings on debian - PODMAN_IGNORE_CGROUPSV1_WARNING: true env: TEST_FLAVOR: build - # NOTE: The default way Cirrus-CI clones is *NOT* compatible with - # environment expectations in contrib/cirrus/lib.sh. Specifically - # the 'origin' remote must be defined, and all remote branches/tags - # must be available for reference from CI scripts. - clone_script: &full_clone | - cd / - rm -rf $CIRRUS_WORKING_DIR - mkdir -p $CIRRUS_WORKING_DIR - git clone --recursive --branch=$DEST_BRANCH https://x-access-token:${CIRRUS_REPO_CLONE_TOKEN}@github.com/${CIRRUS_REPO_FULL_NAME}.git $CIRRUS_WORKING_DIR - cd $CIRRUS_WORKING_DIR - git remote update origin - if [[ -n "$CIRRUS_PR" ]]; then # running for a PR - git fetch origin pull/$CIRRUS_PR/head:pull/$CIRRUS_PR - git checkout pull/$CIRRUS_PR - else - git reset --hard $CIRRUS_CHANGE_IN_REPO - fi - # Some test operations & checks require a git "identity" - _gc='git config --file /root/.gitconfig' - $_gc user.email "TMcTestFace@example.com" - $_gc user.name "Testy McTestface" + clone_script: *full_clone # Attempt to prevent flakes by confirming basic environment expectations, # network service connectivity and essential container image availability. + # TODO: Rename to "ci-sanity" and move into task that runs in parallel to build prebuild_script: &prebuild $SCRIPT_BASE/prebuild.sh - # Standard setup stage call, used by nearly every task in CI. - setup_script: &setup '$GOSRC/$SCRIPT_BASE/setup_environment.sh' + setup_script: *setup # Attempt to prevent flakes by confirming automation environment and # all required external/3rd-party services are available and functional. - # Standard main execution stage call, used by nearly every task in CI. - main_script: &main '/usr/bin/time --verbose --output="$STATS_LOGFILE" $GOSRC/$SCRIPT_BASE/runner.sh' + main_script: *main # Attempt to catch code-quality and vendoring problems early. postbuild_script: &postbuild $SCRIPT_BASE/postbuild.sh # Cirrus-CI is very slow uploading one file at time, and the repo contains @@ -172,6 +185,7 @@ build_task: path: ./*-${STATS_LOGFILE_SFX} type: text/plain + build_aarch64_task: alias: 'build_aarch64' name: 'Build for $DISTRO_NV' @@ -185,9 +199,9 @@ build_aarch64_task: DISTRO_NV: ${FEDORA_AARCH64_NAME} VM_IMAGE_NAME: ${FEDORA_AARCH64_AMI} CTR_FQIN: ${FEDORA_CONTAINER_FQIN} - CI_DESIRED_RUNTIME: crun TEST_FLAVOR: build clone_script: *full_clone + # TODO: Rename to "ci-sanity" and move into task that runs in parallel to build prebuild_script: *prebuild setup_script: *setup postbuild_script: *postbuild @@ -202,77 +216,151 @@ build_aarch64_task: always: *runner_stats -# Confirm the result of building on at least one platform appears sane. -# This confirms the binaries can be executed, checks --help vs docs, and -# other essential post-build validation checks. -validate_task: - name: "Validate ${DISTRO_NV} Build" - alias: validate - # This task is primarily intended to catch human-errors early on, in a - # PR. Skip it for branch-push, branch-create, and tag-push to improve - # automation reliability/speed in those contexts. Any missed errors due - # to nonsequential PR merging practices, will be caught on a future PR, - # build or test task failures. +# There are several other important variations of podman which +# must always build successfully. Most of them are handled in +# this task, though a few need dedicated tasks which follow. +alt_build_task: + name: "$ALT_NAME" + alias: alt_build + # Don't create task for [CI:DOCS], or rhel-release builds # Docs: ./contrib/cirrus/CIModes.md - only_if: &is_pr "$CIRRUS_PR != ''" - depends_on: - - build - # golangci-lint is a very, very hungry beast. - gce_instance: &bigvm - <<: *standardvm - cpu: 8 - memory: "16Gb" - matrix: - - env: - <<: *stdenvars - VM_IMAGE_NAME: ${FEDORA_CACHE_IMAGE_NAME} - DISTRO_NV: ${FEDORA_NAME} - - env: - <<: *stdenvars - VM_IMAGE_NAME: ${RAWHIDE_CACHE_IMAGE_NAME} - DISTRO_NV: ${RAWHIDE_NAME} + only_if: &no_rhel_release | + $CIRRUS_BRANCH !=~ 'v[0-9\.]+-rhel' && + $CIRRUS_BASE_BRANCH !=~ 'v[0-9\.]+-rhel' env: - TEST_FLAVOR: validate - # N/B: This script depends on ${DISTRO_NV} being defined for the task. - clone_script: &get_gosrc | - cd /tmp - echo "$ARTCURL/Build%20for%20${DISTRO_NV}/repo/repo.tbz" - time $ARTCURL/Build%20for%20${DISTRO_NV}/repo/repo.tbz - time tar xjf /tmp/repo.tbz -C $GOSRC + <<: *stdenvars + TEST_FLAVOR: "altbuild" + gce_instance: *standardvm + matrix: + - env: + ALT_NAME: 'Build Each Commit' + - env: + # TODO: Replace with task using `winmake` to build + # binary and archive installation zip file. + ALT_NAME: 'Windows Cross' # N/B: Referenced by URLencoded strings elsewhere + - env: + ALT_NAME: 'Alt Arch. x86 Cross' # N/B: Referenced by URLencoded strings elsewhere + - env: + ALT_NAME: 'Alt Arch. ARM Cross' # N/B: Referenced by URLencoded strings elsewhere + - env: + ALT_NAME: 'Alt Arch. MIPS Cross' # N/B: Referenced by URLencoded strings elsewhere + - env: + ALT_NAME: 'Alt Arch. MIPS64 Cross' # N/B: Referenced by URLencoded strings elsewhere + - env: + ALT_NAME: 'Alt Arch. Other Cross' # N/B: Referenced by URLencoded strings elsewhere + # This task cannot make use of the shared repo.tbz artifact. + clone_script: *full_clone setup_script: *setup main_script: *main + # Produce a new repo.tbz artifact for consumption by 'artifacts' task. + repo_prep_script: *repo_prep + repo_artifacts: *repo_artifacts always: *runner_stats -# Confirm the result of building on at least one platform appears sane. -# This confirms the binaries can be executed, checks --help vs docs, and -# other essential post-build validation checks. -validate_aarch64_task: - name: "Validate $DISTRO_NV Build" - alias: validate_aarch64 - # This task is primarily intended to catch human-errors early on, in a - # PR. Skip it for branch-push, branch-create, and tag-push to improve - # automation reliability/speed in those contexts. Any missed errors due - # to nonsequential PR merging practices, will be caught on a future PR, - # build or test task failures. +# Confirm building the remote client, natively on a Mac OS-X VM. +osx_alt_build_task: + name: "Build for MacOS amd64+arm64" # N/B: Referenced by URLencoded strings elsewhere + alias: osx_alt_build + # Docs: ./contrib/cirrus/CIModes.md + only_if: *no_rhel_release # RHEL never releases podman mac installer binary + persistent_worker: &mac_pw + labels: + os: darwin + arch: arm64 + purpose: prod + env: &mac_env + CIRRUS_SHELL: "/bin/bash" # sh is the default + CIRRUS_WORKING_DIR: "$HOME/ci/task-${CIRRUS_TASK_ID}" # Isolation: $HOME will be set to "ci" dir. + # Prevent cache-pollution fron one task to the next. + GOPATH: "$CIRRUS_WORKING_DIR/.go" + GOCACHE: "$CIRRUS_WORKING_DIR/.go/cache" + GOENV: "$CIRRUS_WORKING_DIR/.go/support" + GOSRC: "$HOME/ci/task-${CIRRUS_TASK_ID}" + clone_script: *full_clone + # This host is/was shared with potentially many other CI tasks. + # The previous task may have been canceled or aborted. + prep_script: &mac_cleanup "contrib/cirrus/mac_cleanup.sh" + lint_script: + - make golangci-lint + basic_build_script: + - make .install.ginkgo + - make podman-remote + - make podman-mac-helper + build_pkginstaller_script: + - pushd contrib/pkginstaller + - make ARCH=amd64 NO_CODESIGN=1 pkginstaller + - make ARCH=aarch64 NO_CODESIGN=1 pkginstaller + - make ARCH=universal NO_CODESIGN=1 pkginstaller + - popd + build_amd64_script: + - make podman-remote-release-darwin_amd64.zip + # Building arm podman needs to be the last thing built in this task + # The Mac tests rely this Podman binary to run, and the CI Mac is ARM-based + build_arm64_script: + - make podman-remote-release-darwin_arm64.zip + # Produce a new repo.tbz artifact for consumption by dependent tasks. + repo_prep_script: *repo_prep + repo_artifacts: *repo_artifacts + # This host is/was shared with potentially many other CI tasks. + # Ensure nothing is left running while waiting for the next task. + always: + task_cleanup_script: *mac_cleanup + + +# Build freebsd release natively on a FreeBSD VM. +freebsd_alt_build_task: + name: "FreeBSD Cross" + alias: freebsd_alt_build + # Only run on 'main' and PRs against 'main' # Docs: ./contrib/cirrus/CIModes.md - only_if: *is_pr + only_if: | + $CIRRUS_CHANGE_TITLE !=~ '.*CI:MACHINE.*' && + ( $CIRRUS_BRANCH == 'main' || $CIRRUS_BASE_BRANCH == 'main' ) + env: + <<: *stdenvars + # Functional FreeBSD builds must be built natively since they depend on CGO + DISTRO_NV: freebsd-13 + VM_IMAGE_NAME: notyet + CTR_FQIN: notyet + CIRRUS_SHELL: "/bin/sh" + TEST_FLAVOR: "altbuild" + ALT_NAME: 'FreeBSD Cross' + freebsd_instance: + image_family: freebsd-13-3 + setup_script: + - pkg install -y gpgme bash go-md2man gmake gsed gnugrep go pkgconf + build_amd64_script: + - gmake podman-release + # This task cannot make use of the shared repo.tbz artifact and must + # produce a new repo.tbz artifact for consumption by 'artifacts' task. + repo_prep_script: *repo_prep + repo_artifacts: *repo_artifacts + + +# Status aggregator for all builds. This task simply makes dependency +# management easier, and results in a simpler graph that using YAML +# anchors/aliases. +build_success_task: + name: "Total Build Success" + alias: build_success depends_on: + - validate-source + - build - build_aarch64 - ec2_instance: *standard_build_ec2_aarch64 + - alt_build + - osx_alt_build + - freebsd_alt_build env: - <<: *stdenvars_aarch64 - TEST_FLAVOR: validate - DISTRO_NV: ${FEDORA_AARCH64_NAME} - # N/B: This script depends on ${DISTRO_NV} being defined for the task. - clone_script: &get_gosrc_aarch64 | - cd /tmp - echo "$ARTCURL/build_aarch64/repo/repo.tbz" - time $ARTCURL/build_aarch64/repo/repo.tbz - time tar xjf /tmp/repo.tbz -C $GOSRC - setup_script: *setup - main_script: *main - always: *runner_stats + CTR_FQIN: ${FEDORA_CONTAINER_FQIN} + container: &smallcontainer + image: ${CTR_FQIN} + # Resources are limited across ALL currently executing tasks + # ref: https://cirrus-ci.org/guide/linux/#linux-containers + cpu: 1 + memory: 1 + clone_script: &noop mkdir -p "$CIRRUS_WORKING_DIR" + script: *noop # Exercise the "libpod" API with a small set of common @@ -287,13 +375,18 @@ bindings_task: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' && $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' && $CIRRUS_CHANGE_TITLE !=~ '.*CI:MACHINE.*' - depends_on: - - build + depends_on: &build + - build_success gce_instance: *standardvm env: <<: *stdenvars TEST_FLAVOR: bindings - clone_script: *get_gosrc + # N/B: This script depends on ${DISTRO_NV} being defined for the task. + clone_script: &get_gosrc | + cd /tmp + echo "$ARTCURL/Build%20for%20${DISTRO_NV}/repo/repo.tbz" + time $ARTCURL/Build%20for%20${DISTRO_NV}/repo/repo.tbz + time tar xjf /tmp/repo.tbz -C $GOSRC setup_script: *setup main_script: *main always: &logs_artifacts @@ -322,14 +415,12 @@ swagger_task: only_if: | $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' && $CIRRUS_CHANGE_TITLE !=~ '.*CI:MACHINE.*' - depends_on: - - build + depends_on: *build gce_instance: *standardvm env: <<: *stdenvars TEST_FLAVOR: swagger CTR_FQIN: 'quay.io/libpod/gcsupld:${IMAGE_SUFFIX}' - # N/B: Do not modify below items w/o update to references in .gitleaks/config.toml GCPJSON: ENCRYPTED[927dc01e755eaddb4242b0845cf86c9098d1e3dffac38c70aefb1487fd8b4fe6dd6ae627b3bffafaba70e2c63172664e] GCPNAME: ENCRYPTED[c145e9c16b6fb88d476944a454bf4c1ccc84bb4ecaca73bdd28bdacef0dfa7959ebc8171a27b2e4064d66093b2cdba49] GCPPROJECT: 'libpod-218412' @@ -343,59 +434,19 @@ swagger_task: type: text/plain -# There are several other important variations of podman which -# must always build successfully. Most of them are handled in -# this task, though a few need dedicated tasks which follow. -alt_build_task: - name: "$ALT_NAME" - alias: alt_build - # Don't create task for [CI:DOCS], or rhel-release builds - # Docs: ./contrib/cirrus/CIModes.md - only_if: &no_rhel_release | - $CIRRUS_BRANCH !=~ 'v[0-9\.]+-rhel' && - $CIRRUS_BASE_BRANCH !=~ 'v[0-9\.]+-rhel' - depends_on: - - build - env: - <<: *stdenvars - TEST_FLAVOR: "altbuild" - gce_instance: *standardvm +win_installer_task: + name: "Verify Win Installer Build" matrix: - env: - ALT_NAME: 'Build Each Commit' - - env: - # TODO: Replace with task using `winmake` to build - # binary and archive installation zip file. - ALT_NAME: 'Windows Cross' - - env: - ALT_NAME: 'Alt Arch. x86 Cross' - - env: - ALT_NAME: 'Alt Arch. ARM Cross' + CONTAINERS_MACHINE_PROVIDER: 'wsl' - env: - ALT_NAME: 'Alt Arch. MIPS Cross' - - env: - ALT_NAME: 'Alt Arch. MIPS64 Cross' - - env: - ALT_NAME: 'Alt Arch. Other Cross' - # This task cannot make use of the shared repo.tbz artifact. - clone_script: *full_clone - setup_script: *setup - main_script: *main - # Produce a new repo.tbz artifact for consumption by 'artifacts' task. - repo_prep_script: *repo_prep - repo_artifacts: *repo_artifacts - always: *runner_stats - - -win_installer_task: - name: "Verify Win Installer Build" + CONTAINERS_MACHINE_PROVIDER: 'hyperv' alias: win_installer only_if: # RHEL never releases podman windows installer binary $CIRRUS_TAG == '' && $CIRRUS_BRANCH !=~ 'v[0-9\.]+-rhel' && $CIRRUS_BASE_BRANCH !=~ 'v[0-9\.]+-rhel' - depends_on: - - alt_build + depends_on: *build ec2_instance: &windows image: "${WINDOWS_AMI}" type: m5.large @@ -441,85 +492,6 @@ win_installer_task: main_script: ".\\repo\\contrib\\cirrus\\win-installer-main.ps1" -# Confirm building the remote client, natively on a Mac OS-X VM. -osx_alt_build_task: - name: "OSX Cross" - alias: osx_alt_build - # Docs: ./contrib/cirrus/CIModes.md - only_if: *no_rhel_release # RHEL never releases podman mac installer binary - depends_on: - - build - persistent_worker: &mac_pw - labels: - os: darwin - arch: arm64 - purpose: prod - env: &mac_env - CIRRUS_SHELL: "/bin/bash" # sh is the default - CIRRUS_WORKING_DIR: "$HOME/ci/task-${CIRRUS_TASK_ID}" # Isolation: $HOME will be set to "ci" dir. - # Prevent cache-pollution fron one task to the next. - GOPATH: "$CIRRUS_WORKING_DIR/.go" - GOCACHE: "$CIRRUS_WORKING_DIR/.go/cache" - GOENV: "$CIRRUS_WORKING_DIR/.go/support" - GOSRC: "$HOME/ci/task-${CIRRUS_TASK_ID}" - # This host is/was shared with potentially many other CI tasks. - # The previous task may have been canceled or aborted. - prep_script: &mac_cleanup "contrib/cirrus/mac_cleanup.sh" - lint_script: - - make lint || true # TODO: Enable when code passes check - basic_build_script: - - make .install.ginkgo - - make podman-remote - - make podman-mac-helper - build_amd64_script: - - make podman-remote-release-darwin_amd64.zip - build_arm64_script: - - make podman-remote-release-darwin_arm64.zip - build_pkginstaller_script: - - cd contrib/pkginstaller - - make ARCH=amd64 NO_CODESIGN=1 pkginstaller - - make ARCH=aarch64 NO_CODESIGN=1 pkginstaller - # Produce a new repo.tbz artifact for consumption by dependent tasks. - repo_prep_script: *repo_prep - repo_artifacts: *repo_artifacts - # This host is/was shared with potentially many other CI tasks. - # Ensure nothing is left running while waiting for the next task. - always: - task_cleanup_script: *mac_cleanup - - -# Build freebsd release natively on a FreeBSD VM. -freebsd_alt_build_task: - name: "FreeBSD Cross" - alias: freebsd_alt_build - # Only run on 'main' and PRs against 'main' - # Docs: ./contrib/cirrus/CIModes.md - only_if: | - $CIRRUS_CHANGE_TITLE !=~ '.*CI:MACHINE.*' && - ( $CIRRUS_BRANCH == 'main' || $CIRRUS_BASE_BRANCH == 'main' ) - depends_on: - - build - env: - <<: *stdenvars - # Functional FreeBSD builds must be built natively since they depend on CGO - DISTRO_NV: freebsd-13 - VM_IMAGE_NAME: notyet - CTR_FQIN: notyet - CIRRUS_SHELL: "/bin/sh" - TEST_FLAVOR: "altbuild" - ALT_NAME: 'FreeBSD Cross' - freebsd_instance: - image_family: freebsd-13-2 - setup_script: - - pkg install -y gpgme bash go-md2man gmake gsed gnugrep go pkgconf - build_amd64_script: - - gmake podman-release - # This task cannot make use of the shared repo.tbz artifact and must - # produce a new repo.tbz artifact for consumption by 'artifacts' task. - repo_prep_script: *repo_prep - repo_artifacts: *repo_artifacts - - # Verify podman is compatible with the docker python-module. docker-py_test_task: name: Docker-py Compat. @@ -532,9 +504,7 @@ docker-py_test_task: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' && $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' && $CIRRUS_CHANGE_TITLE !=~ '.*CI:MACHINE.*' - - depends_on: - - build + depends_on: *build gce_instance: *standardvm env: <<: *stdenvars @@ -552,9 +522,7 @@ unit_test_task: alias: unit_test # Docs: ./contrib/cirrus/CIModes.md only_if: *not_tag_branch_build_docs_machine - depends_on: - - build - - validate + depends_on: *build matrix: - env: *stdenvars # Special-case: Rootless on latest Fedora (standard) VM @@ -576,9 +544,7 @@ apiv2_test_task: alias: apiv2_test # Docs: ./contrib/cirrus/CIModes.md only_if: *not_tag_branch_build_docs_machine - depends_on: - - build - - validate + depends_on: *build gce_instance: *standardvm # Test is normally pretty quick, about 10-minutes. If it hangs, # don't make developers wait the full 1-hour timeout. @@ -602,25 +568,16 @@ compose_test_task: alias: compose_test # Docs: ./contrib/cirrus/CIModes.md only_if: *not_tag_branch_build_docs_machine - depends_on: - - build - - validate + depends_on: *build gce_instance: *standardvm matrix: - env: - TEST_FLAVOR: compose - PRIV_NAME: root - - env: - TEST_FLAVOR: compose - PRIV_NAME: rootless - - env: - TEST_FLAVOR: compose_v2 PRIV_NAME: root - env: - TEST_FLAVOR: compose_v2 PRIV_NAME: rootless env: <<: *stdenvars + TEST_FLAVOR: compose_v2 clone_script: *get_gosrc setup_script: *setup main_script: *main @@ -635,12 +592,25 @@ local_integration_test_task: &local_integration_test_task alias: local_integration_test # Docs: ./contrib/cirrus/CIModes.md only_if: *not_tag_branch_build_docs_machine - depends_on: &build_unit - - build - - unit_test + # skip when: - it is a PR (we never want to skip on nightly tests); and + # - CI:ALL title is not set; and + # - no danger files are changed; and + # - when no int test code is changed; and + # - NOT (source code is changed AND NOT only test files) + skip: &skip_int_test >- + $CIRRUS_PR != '' && + $CIRRUS_CHANGE_TITLE !=~ '.*CI:ALL.*' && + !changesInclude('.cirrus.yml', 'Makefile', 'contrib/cirrus/**', 'vendor/**', 'hack/**', 'version/rawversion/*') && + !changesInclude('test/e2e/**', 'test/utils/**') && + !(changesInclude('**/*.go', '**/*.c') && !changesIncludeOnly('test/**', 'pkg/machine/e2e/**')) + depends_on: *build matrix: *platform_axis - gce_instance: *standardvm - timeout_in: 50m + # integration tests scale well with cpu as they are parallelized + # so we give these tests 4 cores to make them faster + gce_instance: &fastvm + <<: *standardvm + cpu: 4 + timeout_in: 30m env: TEST_FLAVOR: int clone_script: *get_gosrc @@ -674,21 +644,20 @@ container_integration_test_task: alias: container_integration_test # Docs: ./contrib/cirrus/CIModes.md only_if: *not_tag_branch_build_docs_machine - depends_on: *build_unit + skip: *skip_int_test + depends_on: *build matrix: &fedora_vm_axis - env: DISTRO_NV: ${FEDORA_NAME} VM_IMAGE_NAME: ${FEDORA_CACHE_IMAGE_NAME} CTR_FQIN: ${FEDORA_CONTAINER_FQIN} - CI_DESIRED_RUNTIME: crun - env: DISTRO_NV: ${PRIOR_FEDORA_NAME} VM_IMAGE_NAME: ${PRIOR_FEDORA_CACHE_IMAGE_NAME} CTR_FQIN: ${PRIOR_FEDORA_CONTAINER_FQIN} - CI_DESIRED_RUNTIME: crun CI_DESIRED_DATABASE: boltdb - gce_instance: *standardvm - timeout_in: 50m + gce_instance: *fastvm + timeout_in: 30m env: TEST_FLAVOR: int TEST_ENVIRON: container @@ -704,10 +673,11 @@ rootless_integration_test_task: alias: rootless_integration_test # Docs: ./contrib/cirrus/CIModes.md only_if: *not_tag_branch_build_docs_machine - depends_on: *build_unit + skip: *skip_int_test + depends_on: *build matrix: *platform_axis - gce_instance: *standardvm - timeout_in: 50m + gce_instance: *fastvm + timeout_in: 30m env: TEST_FLAVOR: int PRIV_NAME: rootless @@ -727,13 +697,16 @@ podman_machine_task: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' && $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' ) || $CIRRUS_CRON == "main" - depends_on: - - build - - validate - - local_integration_test - - remote_integration_test - - container_integration_test - - rootless_integration_test + # skip when: - it is a PR (we never want to skip on nightly tests); and + # - CI:ALL title is not set; and + # - no danger files are changed; and + # - no machine code files are changed + skip: &skip_machine_test >- + $CIRRUS_PR != '' && + $CIRRUS_CHANGE_TITLE !=~ '.*CI:ALL.*' && + !changesInclude('.cirrus.yml', 'Makefile', 'contrib/cirrus/**', 'vendor/**', 'hack/**', 'version/rawversion/*') && + !changesInclude('cmd/podman/machine/**', 'pkg/machine/**', '**/*machine*.go') + depends_on: *build ec2_instance: image: "${VM_IMAGE_NAME}" type: "${EC2_INST_TYPE}" @@ -754,13 +727,8 @@ podman_machine_aarch64_task: name: *std_name_fmt alias: podman_machine_aarch64 only_if: *machine_cron_not_tag_build_docs - depends_on: - - build_aarch64 - - validate_aarch64 - - local_integration_test - - remote_integration_test - - container_integration_test - - rootless_integration_test + skip: *skip_machine_test + depends_on: *build ec2_instance: <<: *standard_build_ec2_aarch64 env: @@ -769,7 +737,11 @@ podman_machine_aarch64_task: PRIV_NAME: "rootless" # intended use-case DISTRO_NV: "${FEDORA_AARCH64_NAME}" VM_IMAGE_NAME: "${FEDORA_AARCH64_AMI}" - clone_script: *get_gosrc_aarch64 + clone_script: &get_gosrc_aarch64 | + cd /tmp + echo "$ARTCURL/build_aarch64/repo/repo.tbz" + time $ARTCURL/build_aarch64/repo/repo.tbz + time tar xjf /tmp/repo.tbz -C $GOSRC setup_script: *setup main_script: *main always: *int_logs_artifacts @@ -781,14 +753,8 @@ podman_machine_windows_task: # Only run for non-docs/copr PRs and non-release branch builds # and never for tags. Docs: ./contrib/cirrus/CIModes.md only_if: *machine_cron_not_tag_build_docs - depends_on: - - alt_build - - build - - win_installer - - local_integration_test - - remote_integration_test - - container_integration_test - - rootless_integration_test + skip: *skip_machine_test + depends_on: *build ec2_instance: <<: *windows type: m5zn.metal @@ -801,18 +767,19 @@ podman_machine_windows_task: TEST_FLAVOR: "machine-hyperv" clone_script: *winclone main_script: ".\\repo\\contrib\\cirrus\\win-podman-machine-main.ps1" + always: + # Required for `contrib/cirrus/logformatter` to work properly + html_artifacts: + path: ./*.html + type: text/html podman_machine_mac_task: name: *std_name_fmt alias: podman_machine_mac only_if: *machine_cron_not_tag_build_docs - depends_on: - - osx_alt_build - - local_integration_test - - remote_integration_test - - container_integration_test - - rootless_integration_test + skip: *skip_machine_test + depends_on: *build persistent_worker: *mac_pw env: <<: *mac_env @@ -828,7 +795,7 @@ podman_machine_mac_task: clone_script: # artifacts from osx_alt_build_task - mkdir -p $CIRRUS_WORKING_DIR - cd $CIRRUS_WORKING_DIR - - $ARTCURL/OSX%20Cross/repo/repo.tbz + - $ARTCURL/Build%20for%20MacOS%20amd64%2Barm64/repo/repo.tbz - tar xjf repo.tbz # This host is/was shared with potentially many other CI tasks. # The previous task may have been canceled or aborted. @@ -838,15 +805,18 @@ podman_machine_mac_task: # TODO: Timeout bumped b/c initial image download (~5min) and VM # resize (~2min) causes test-timeout (90s default). Should # tests deal with this internally? - smoke_test_script: - - MACHINE_TEST_TIMEOUT=500 make localmachine FOCUS_FILE="basic_test.go" test_script: - - make localmachine + - "contrib/cirrus/mac_runner.sh" # This host is/was shared with potentially many other CI tasks. # Ensure nothing is left running while waiting for the next task. always: + # Required for `contrib/cirrus/logformatter` to work properly + html_artifacts: + path: ./*.html + type: text/html task_cleanup_script: *mac_cleanup + # Always run subsequent to integration tests. While parallelism is lost # with runtime, debugging system-test failures can be more challenging # for some golang developers. Otherwise the following tasks run across @@ -861,7 +831,18 @@ local_system_test_task: &local_system_test_task $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' && $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' && $CIRRUS_CHANGE_TITLE !=~ '.*CI:MACHINE.*' - depends_on: *build_unit + # skip when: - it is a PR (we never want to skip on nightly tests); and + # - CI:ALL title is not set; and + # - no danger files are changed; and + # - no system test code is changed; and + # - NOT (source code is changed AND not only test files) + skip: &skip_system_test >- + $CIRRUS_PR != '' && + $CIRRUS_CHANGE_TITLE !=~ '.*CI:ALL.*' && + !changesInclude('.cirrus.yml', 'Makefile', 'contrib/cirrus/**', 'vendor/**', 'hack/**', 'version/rawversion/*') && + !changesInclude('test/system/**') && + !(changesInclude('**/*.go', '**/*.c') && !changesIncludeOnly('test/**', 'pkg/machine/e2e/**')) + depends_on: *build matrix: *platform_axis gce_instance: *standardvm env: @@ -878,10 +859,9 @@ local_system_test_aarch64_task: &local_system_test_task_aarch64 # Don't create task for tags, or if using [CI:DOCS], [CI:BUILD] # Docs: ./contrib/cirrus/CIModes.md only_if: *not_tag_magic - depends_on: - - build_aarch64 - - validate_aarch64 - - unit_test + skip: *skip_system_test + depends_on: *build + persistent_worker: *mac_pw ec2_instance: *standard_build_ec2_aarch64 env: <<: *stdenvars_aarch64 @@ -917,7 +897,6 @@ rootless_remote_system_test_task: # Not used here, is used in other tasks VM_IMAGE_NAME: ${FEDORA_CACHE_IMAGE_NAME} CTR_FQIN: ${FEDORA_CONTAINER_FQIN} - CI_DESIRED_RUNTIME: crun <<: *local_system_test_task alias: rootless_remote_system_test gce_instance: *standardvm @@ -932,7 +911,8 @@ rootless_system_test_task: alias: rootless_system_test # Docs: ./contrib/cirrus/CIModes.md only_if: *not_tag_magic - depends_on: *build_unit + skip: *skip_system_test + depends_on: *build matrix: *platform_axis gce_instance: *standardvm env: @@ -943,14 +923,15 @@ rootless_system_test_task: main_script: *main always: *logs_artifacts + minikube_test_task: name: *std_name_fmt alias: minikube_test # Docs: ./contrib/cirrus/CIModes.md only_if: *not_tag_magic - depends_on: - - build - - rootless_system_test + # 2024-05-21: flaking almost constantly since March. + skip: $CI == $CI + depends_on: *build gce_instance: *standardvm env: <<: *stdenvars @@ -966,9 +947,7 @@ farm_test_task: alias: farm_test # Docs: ./contrib/cirrus/CIModes.md only_if: *not_tag_magic - depends_on: - - build - - rootless_system_test + depends_on: *build gce_instance: *standardvm env: <<: *stdenvars @@ -984,9 +963,16 @@ buildah_bud_test_task: alias: buildah_bud_test # Docs: ./contrib/cirrus/CIModes.md only_if: *not_tag_magic - depends_on: - - build - - local_integration_test + # skip when: - it is a PR (we never want to skip on nightly tests); and + # - CI:ALL title is not set; and + # - no danger files are changed; and + # - no build source files are changed and no bud tests + skip: >- + $CIRRUS_PR != '' && + $CIRRUS_CHANGE_TITLE !=~ '.*CI:ALL.*' && + !changesInclude('.cirrus.yml', 'Makefile', 'contrib/cirrus/**', 'vendor/**', 'hack/**', 'version/rawversion/*') && + !changesInclude('**/*build*.go', 'test/buildah-bud/**') + depends_on: *build env: <<: *stdenvars TEST_FLAVOR: bud @@ -1007,12 +993,14 @@ upgrade_test_task: alias: upgrade_test # Docs: ./contrib/cirrus/CIModes.md only_if: *not_tag_magic - depends_on: - - build - - local_system_test + depends_on: *build matrix: -# - env: -# PODMAN_UPGRADE_FROM: v4.1.0 + - env: + # 2024-02: as long as possible/reasonable, try to keep + # one version < 4.8 so we can test boltdb. v4.3.1 is + # the lowest we can go right now, builds before that + # have netavark <1.4 which hangs on f39 kernel (#21863). + PODMAN_UPGRADE_FROM: v4.3.1 - env: PODMAN_UPGRADE_FROM: v4.8.0 gce_instance: *standardvm @@ -1045,39 +1033,32 @@ meta_task: ${PRIOR_FEDORA_CACHE_IMAGE_NAME} ${RAWHIDE_CACHE_IMAGE_NAME} ${DEBIAN_CACHE_IMAGE_NAME} - build-push-${IMAGE_SUFFIX} EC2IMGNAMES: >- ${FEDORA_AARCH64_AMI} ${FEDORA_AMI} ${WINDOWS_AMI} BUILDID: "${CIRRUS_BUILD_ID}" REPOREF: "${CIRRUS_REPO_NAME}" - # N/B: Do not modify below items w/o update to references in .gitleaks/config.toml AWSINI: ENCRYPTED[21b2db557171b11eb5abdbccae593f48c9caeba86dfcc4d4ff109edee9b4656ab6720a110dadfcd51e88cc59a71cc7af] GCPJSON: ENCRYPTED[3a198350077849c8df14b723c0f4c9fece9ebe6408d35982e7adf2105a33f8e0e166ed3ed614875a0887e1af2b8775f4] GCPNAME: ENCRYPTED[2f9738ef295a706f66a13891b40e8eaa92a89e0e87faf8bed66c41eca72bf76cfd190a6f2d0e8444c631fdf15ed32ef6] GCPPROJECT: libpod-218412 - clone_script: &noop mkdir -p "$CIRRUS_WORKING_DIR" + clone_script: *noop script: /usr/local/bin/entrypoint.sh -# Status aggregator for all tests. This task simply ensures a defined -# set of tasks all passed, and allows confirming that based on the status -# of this task. +# Status aggregator for all tests. This task ensures a defined set of tasks +# all passed, and allows confirming that based on the status of this task. success_task: + # N/B: The prow merge-bot (tide) is sensitized to this exact name, DO NOT CHANGE IT. + # Ref: https://github.com/openshift/release/pull/48855 name: "Total Success" alias: success # N/B: ALL tasks must be listed here, minus their '_task' suffix. depends_on: - - build - - build_aarch64 - - validate - - validate_aarch64 + - build_success - bindings - swagger - - alt_build - - osx_alt_build - - freebsd_alt_build - win_installer - docker-py_test - unit_test @@ -1095,22 +1076,21 @@ success_task: - local_system_test_aarch64 - remote_system_test - remote_system_test_aarch64 + - rootless_remote_system_test - rootless_system_test + - local_system_test + - local_system_test_aarch64 + - remote_system_test - rootless_remote_system_test + - rootless_system_test - minikube_test - farm_test - buildah_bud_test - upgrade_test - meta - container: &smallcontainer - image: ${CTR_FQIN} - # Resources are limited across ALL currently executing tasks - # ref: https://cirrus-ci.org/guide/linux/#linux-containers - cpu: 2 - memory: 2 env: CTR_FQIN: ${FEDORA_CONTAINER_FQIN} - TEST_ENVIRON: container + container: *smallcontainer clone_script: *noop script: | if [[ "$CIRRUS_CHANGE_TITLE" =~ CI:MACHINE ]] && [[ -n "$CIRRUS_PR" ]]; then @@ -1188,7 +1168,7 @@ artifacts_task: osx_binaries_script: - mkdir -p /tmp/osx - cd /tmp/osx - - $ARTCURL/OSX%20Cross/repo/repo.tbz + - $ARTCURL/Build%20for%20MacOS%20amd64%2Barm64/repo/repo.tbz - tar xjf repo.tbz - mv ./podman-remote-release-darwin_*.zip $CIRRUS_WORKING_DIR/ - mv ./contrib/pkginstaller/out/podman-installer-macos-*.pkg $CIRRUS_WORKING_DIR/ @@ -1211,7 +1191,7 @@ release_task: # Docs: ./contrib/cirrus/CIModes.md only_if: $CIRRUS_TAG != '' depends_on: - - build + - build_success - success gce_instance: *standardvm env: @@ -1238,7 +1218,7 @@ release_test_task: # see RELEASE_PROCESS.md trigger_type: manual depends_on: - - build + - build_success - success gce_instance: *standardvm env: diff --git a/.github/workflows/check_cirrus_cron.yml b/.github/workflows/check_cirrus_cron.yml index c4ca4efaa1..8b8dfe78b3 100644 --- a/.github/workflows/check_cirrus_cron.yml +++ b/.github/workflows/check_cirrus_cron.yml @@ -44,7 +44,7 @@ jobs: runs-on: ubuntu-latest steps: # This is where the scripts live - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + - uses: actions/checkout@v4 with: repository: containers/podman ref: 'main' @@ -61,7 +61,7 @@ jobs: - if: steps.cron.outputs.failures > 0 name: Send failure notification e-mail # Ref: https://github.com/dawidd6/action-send-mail - uses: dawidd6/action-send-mail@v3.11.0 + uses: dawidd6/action-send-mail@v3.12.0 with: server_address: ${{secrets.ACTION_MAIL_SERVER}} server_port: 465 @@ -80,7 +80,7 @@ jobs: - if: failure() name: Send error notification e-mail - uses: dawidd6/action-send-mail@v3.11.0 + uses: dawidd6/action-send-mail@v3.12.0 with: server_address: ${{secrets.ACTION_MAIL_SERVER}} server_port: 465 diff --git a/.github/workflows/discussion_lock.yml b/.github/workflows/discussion_lock.yml deleted file mode 100644 index 79fbbb3ce4..0000000000 --- a/.github/workflows/discussion_lock.yml +++ /dev/null @@ -1,68 +0,0 @@ ---- - -# Format ref: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions - -name: "Lock closed Issue/PR discussions" - -on: - schedule: - - cron: '0 0 * * *' - # Allow re-use of this workflow by other repositories - # Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows - workflow_call: - secrets: - ACTION_MAIL_SERVER: - required: true - ACTION_MAIL_USERNAME: - required: true - ACTION_MAIL_PASSWORD: - required: true - ACTION_MAIL_SENDER: - required: true - # Debug: Allow triggering job manually in github-actions WebUI - workflow_dispatch: {} - -permissions: - contents: read - -concurrency: - group: lock - -env: - # Number of days before a closed issue/PR is be comment-locked. - # Note: dessant/lock-threads will only process a max. of - # 50 issues/PRs at a time. - CLOSED_DAYS: 90 - # Pre-created issue/PR label to add (preferably a bright color). - # This is intended to direct a would-be commenter's actions. - LOCKED_LABEL: 'locked - please file new issue/PR' - -jobs: - closed_issue_discussion_lock: - name: "Lock closed Issue/PR discussions" - runs-on: ubuntu-latest - permissions: - issues: write - pull-requests: write - steps: - # Ref: https://github.com/dessant/lock-threads#usage - - uses: dessant/lock-threads@v5 - with: - issue-inactive-days: '${{env.CLOSED_DAYS}}' - pr-inactive-days: '${{env.CLOSED_DAYS}}' - add-issue-labels: '${{env.LOCKED_LABEL}}' - add-pr-labels: '${{env.LOCKED_LABEL}}' - pr-lock-reason: 'resolved' - log-output: true - - if: failure() - name: Send job failure notification e-mail - uses: dawidd6/action-send-mail@v3.11.0 - with: - server_address: ${{secrets.ACTION_MAIL_SERVER}} - server_port: 465 - username: ${{secrets.ACTION_MAIL_USERNAME}} - password: ${{secrets.ACTION_MAIL_PASSWORD}} - subject: Github workflow error on ${{github.repository}} - to: podman-monitor@lists.podman.io - from: ${{secrets.ACTION_MAIL_SENDER}} - body: "Job failed: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}" diff --git a/.github/workflows/issue_pr_lock.yml b/.github/workflows/issue_pr_lock.yml new file mode 100644 index 0000000000..fee1e7f7d6 --- /dev/null +++ b/.github/workflows/issue_pr_lock.yml @@ -0,0 +1,85 @@ +--- + +# WARNING ALERT DANGER CAUTION ATTENTION: This file is re-used from the +# `main` branch, by workflows in (at least) the Buildah and Skopeo repos. +# Please think twice before making large changes, renaming, or moving the file. + +# Format ref: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions + +name: "Lock closed issues and PRs" + +on: + schedule: + - cron: '0 0 * * *' + # Allow re-use of this workflow by other repositories + # Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows + workflow_call: + secrets: + STALE_LOCKING_APP_PRIVATE_KEY: + required: true + ACTION_MAIL_SERVER: + required: true + ACTION_MAIL_USERNAME: + required: true + ACTION_MAIL_PASSWORD: + required: true + ACTION_MAIL_SENDER: + required: true + # Debug: Allow triggering job manually in github-actions WebUI + workflow_dispatch: {} + +permissions: + contents: read + +concurrency: + group: lock + +env: + # Number of days before a closed issue/PR is be comment-locked. + # Note: dessant/lock-threads will only process a max. of + # 50 issues/PRs at a time. + CLOSED_DAYS: 90 + # Pre-created issue/PR label to add (preferably a bright color). + # This is intended to direct a would-be commenter's actions. + LOCKED_LABEL: 'locked - please file new issue/PR' + +jobs: + manage_locking: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + # Use dedicated github app to workaround API rate limiting + # Ref: https://docs.github.com/en/apps/creating-github-apps/authenticating-with-a-github-app/making-authenticated-api-requests-with-a-github-app-in-a-github-actions-workflow + - name: Obtain Stale Locking App token + id: generate-token + uses: actions/create-github-app-token@v1 + with: + # N/B: These are both defined at the containers-org level + app-id: ${{ vars.STALE_LOCKING_APP_ID }} + private-key: ${{ secrets.STALE_LOCKING_APP_PRIVATE_KEY }} + + # Ref: https://github.com/dessant/lock-threads#usage + - uses: dessant/lock-threads@v5 + with: + github-token: '${{ steps.generate-token.outputs.token }}' + process-only: 'issues, prs' + issue-inactive-days: '${{env.CLOSED_DAYS}}' + pr-inactive-days: '${{env.CLOSED_DAYS}}' + add-issue-labels: '${{env.LOCKED_LABEL}}' + add-pr-labels: '${{env.LOCKED_LABEL}}' + pr-lock-reason: 'resolved' + log-output: true + - if: failure() + name: Send job failure notification e-mail + uses: dawidd6/action-send-mail@v3.12.0 + with: + server_address: ${{secrets.ACTION_MAIL_SERVER}} + server_port: 465 + username: ${{secrets.ACTION_MAIL_USERNAME}} + password: ${{secrets.ACTION_MAIL_PASSWORD}} + subject: Github workflow error on ${{github.repository}} + to: podman-monitor@lists.podman.io + from: ${{secrets.ACTION_MAIL_SENDER}} + body: "Job failed: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}" diff --git a/.github/workflows/mac-pkg.yml b/.github/workflows/mac-pkg.yml index 6fdb681052..34ff93e4c0 100644 --- a/.github/workflows/mac-pkg.yml +++ b/.github/workflows/mac-pkg.yml @@ -67,6 +67,7 @@ jobs: URI="https://github.com/containers/podman/releases/download/${{steps.getversion.outputs.version}}" ARM_FILE="podman-installer-macos-arm64.pkg" AMD_FILE="podman-installer-macos-amd64.pkg" + UNIVERSAL_FILE="podman-installer-macos-universal.pkg" status=$(curl -s -o /dev/null -w "%{http_code}" "${URI}/${ARM_FILE}") if [[ "$status" == "404" ]] ; then @@ -83,12 +84,21 @@ jobs: echo "::warning::AMD installer already exists, skipping" echo "buildamd=false" >> $GITHUB_OUTPUT fi + + status=$(curl -s -o /dev/null -w "%{http_code}" "${URI}/${UNIVERSAL_FILE}") + if [[ "$status" == "404" ]] ; then + echo "builduniversal=true" >> $GITHUB_OUTPUT + else + echo "::warning::Universal installer already exists, skipping" + echo "builduniversal=false" >> $GITHUB_OUTPUT + fi - name: Checkout Version if: >- steps.check.outputs.buildamd == 'true' || steps.check.outputs.buildarm == 'true' || + steps.check.outputs.builduniversal == 'true' || steps.actual_dryrun.outputs.dryrun == 'true' - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@v4 with: ref: ${{steps.getversion.outputs.version}} - name: Set up Go @@ -96,6 +106,7 @@ jobs: if: >- steps.check.outputs.buildamd == 'true' || steps.check.outputs.buildarm == 'true' || + steps.check.outputs.builduniversal == 'true' || steps.actual_dryrun.outputs.dryrun == 'true' uses: actions/setup-go@v5 with: @@ -104,6 +115,7 @@ jobs: if: >- steps.check.outputs.buildamd == 'true' || steps.check.outputs.buildarm == 'true' || + steps.check.outputs.builduniversal == 'true' || steps.actual_dryrun.outputs.dryrun == 'true' run: | echo $APPLICATION_CERTIFICATE | base64 --decode -o appcert.p12 @@ -129,10 +141,17 @@ jobs: run: | make ARCH=amd64 notarize &> /dev/null cd out && shasum -a 256 podman-installer-macos-amd64.pkg >> shasums + - name: Build and Sign Universal + if: steps.check.outputs.builduniversal == 'true' || steps.actual_dryrun.outputs.dryrun == 'true' + working-directory: contrib/pkginstaller + run: | + make ARCH=universal notarize &> /dev/null + cd out && shasum -a 256 podman-installer-macos-universal.pkg >> shasums - name: Artifact if: >- steps.check.outputs.buildamd == 'true' || steps.check.outputs.buildarm == 'true' || + steps.check.outputs.builduniversal == 'true' || steps.actual_dryrun.outputs.dryrun == 'true' uses: actions/upload-artifact@v4 with: @@ -144,7 +163,8 @@ jobs: if: >- steps.actual_dryrun.outputs.dryrun == 'false' && (steps.check.outputs.buildamd == 'true' || - steps.check.outputs.buildarm == 'true') + steps.check.outputs.buildarm == 'true'|| + steps.check.outputs.builduniversal == 'true' ) env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml new file mode 100644 index 0000000000..c34db23384 --- /dev/null +++ b/.github/workflows/release-artifacts.yml @@ -0,0 +1,262 @@ +name: Upload Release Artifacts + +on: + release: + types: [published] + workflow_dispatch: + inputs: + version: + description: 'Release version to build and upload (e.g. "v9.8.7")' + required: true + dryrun: + description: 'Perform all the steps except uploading to the release page' + required: true + default: "true" # 'choice' type requires string value + type: choice + options: + - "true" # Must be quoted string, boolean value not supported. + - "false" + +permissions: + contents: write + actions: write + +jobs: + build: + runs-on: ubuntu-22.04 + steps: + # If the job fails, these details are all but impossible to observe.yy + - name: Provide github event JSON for examination + run: | + echo "::group::Event JSON" + jq --color-output "." "${{ github.event_path }}" + echo "::endgroup::" + + - name: Determine Version + id: getversion + run: | + if [[ -z "${{ inputs.version }}" ]] + then + VERSION=${{ github.event.release.tag_name }} + else + VERSION=${{ inputs.version }} + fi + + if ! grep -Eq 'v[0-9]+(\.[0-9]+(\.[0-9]+(-.+)?)?)?$' <<<"$VERSION" + then + echo "Unable to parse release version '$VERSION' from github event JSON, or workflow 'version' input." + exit 1 + fi + + if grep -Eq '.+-dev$' <<<"$VERSION" + then + echo "Refusing to process a "-dev" version '$VERSION'" + exit 1 + fi + + echo + echo "version=$VERSION" >> $GITHUB_OUTPUT + + - name: Consolidate dryrun setting to always be true or false + id: actual_dryrun + run: | + # The 'release' trigger will not have a 'dryrun' input set. Handle + # this case in a readable/maintainable way. + if [[ -z "${{ inputs.dryrun }}" ]] + then + echo "dryrun=false" >> $GITHUB_OUTPUT + else + echo "dryrun=${{ inputs.dryrun }}" >> $GITHUB_OUTPUT + fi + + - name: Dry Run Status + run: | + echo "::notice::This workflow execution will be a dry-run: ${{ steps.actual_dryrun.outputs.dryrun }}" + + - name: Check uploads + id: check + run: | + URI="https://github.com/containers/podman/releases/download/${{steps.getversion.outputs.version}}" + for artifact in "podman-remote-release-darwin_amd64.zip darwin_amd" \ + 'podman-remote-release-darwin_arm64.zip darwin_arm' \ + 'podman-remote-release-windows_amd64.zip windows_amd' \ + 'podman-remote-static-linux_amd64.tar.gz linux_amd' \ + 'podman-remote-static-linux_arm64.tar.gz linux_arm' + do + set -- $artifact # Convert the "tuple" into the param args $1 $2... + status=$(curl -s -o /dev/null -w "%{http_code}" "${URI}/${1:?}") + if [[ "$status" == "404" ]] ; then + echo "${1:?} will be built" + needsbuild=true + echo "${2:?}=true" >> $GITHUB_OUTPUT + else + echo "::warning::${1:?} already exists, skipping" + fi + done + + if [ "$needsbuild" = true ]; then + echo "buildartifacts=true" >> $GITHUB_OUTPUT + else + echo "No new artifacts need to be built." + fi + + - name: Checkout Version + if: >- + steps.check.outputs.buildartifacts == 'true' || + steps.actual_dryrun.outputs.dryrun == 'true' + uses: actions/checkout@v4 + with: + ref: ${{steps.getversion.outputs.version}} + + - name: Set up Go + if: >- + steps.check.outputs.buildartifacts == 'true' || + steps.actual_dryrun.outputs.dryrun == 'true' + uses: actions/setup-go@v5 + with: + go-version: stable + + - name: Setup artifact directory + if: >- + steps.check.outputs.buildartifacts == 'true' || + steps.actual_dryrun.outputs.dryrun == 'true' + run: mkdir -p release/ + + - name: Build Darwin AMD + if: >- + steps.check.outputs.darwin_amd == 'true' || + steps.actual_dryrun.outputs.dryrun == 'true' + run: | + make podman-remote-release-darwin_amd64.zip + mv podman-remote-release-darwin_amd64.zip release/ + + - name: Build Darwin ARM + if: >- + steps.check.outputs.darwin_arm == 'true' || + steps.actual_dryrun.outputs.dryrun == 'true' + run: | + make podman-remote-release-darwin_arm64.zip + mv podman-remote-release-darwin_arm64.zip release/ + + - name: Build Linux AMD + if: >- + steps.check.outputs.linux_amd == 'true' || + steps.actual_dryrun.outputs.dryrun == 'true' + run: | + make podman-remote-static-linux_amd64 + tar -cvzf podman-remote-static-linux_amd64.tar.gz bin/podman-remote-static-linux_amd64 + mv podman-remote-static-linux_amd64.tar.gz release/ + + - name: Build Linux ARM + if: >- + steps.check.outputs.linux_arm == 'true' || + steps.actual_dryrun.outputs.dryrun == 'true' + run: | + make podman-remote-static-linux_arm64 + tar -cvzf podman-remote-static-linux_arm64.tar.gz bin/podman-remote-static-linux_arm64 + mv podman-remote-static-linux_arm64.tar.gz release/ + + - name: Build Windows AMD + if: >- + steps.check.outputs.windows_amd == 'true' || + steps.actual_dryrun.outputs.dryrun == 'true' + run: | + sudo apt-get install -y pandoc + make podman-remote-release-windows_amd64.zip + mv podman-remote-release-windows_amd64.zip release/ + + - name: shasums + if: >- + steps.check.outputs.buildartifacts == 'true' || + steps.actual_dryrun.outputs.dryrun == 'true' + run: | + pushd release + sha256sum *.zip *.tar.gz > shasums + popd + + - name: Upload to Actions as artifact + if: >- + steps.check.outputs.buildartifacts == 'true' || + steps.actual_dryrun.outputs.dryrun == 'true' + uses: actions/upload-artifact@v4 + with: + name: artifacts + path: | + release/* + + - name: Upload to Release + id: upload + if: >- + steps.check.outputs.buildartifacts == 'true' && + steps.actual_dryrun.outputs.dryrun == 'false' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + (gh release download ${{steps.getversion.outputs.version}} -p "shasums" || exit 0) + cat release/shasums >> shasums + gh release upload ${{steps.getversion.outputs.version}} release/*.zip release/*.tar.gz + gh release upload ${{steps.getversion.outputs.version}} --clobber shasums + + # WARNING: This should only be set when 'notification' job should be triggered + echo "complete=true" >> $GITHUB_OUTPUT + + - name: Trigger Windows Installer + if: >- + steps.check.outputs.windows_amd == 'true' || + steps.actual_dryrun.outputs.dryrun == 'false' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh workflow run upload-win-installer.yml -f version=${{steps.getversion.outputs.version}} -f dryrun=false + + outputs: + uploaded: ${{ steps.upload.outputs.complete }} + version: ${{ steps.getversion.outputs.version }} + + notification: + if: needs.build.outputs.uploaded == 'true' + runs-on: ubuntu-22.04 + needs: build + steps: + - name: Format release email + id: format + env: + VERSION: ${{ needs.build.outputs.version }} + run: | + if grep -Eq '.+-rc' <<<"$VERSION" + then + RC_PREFIX="candidate " + fi + + echo "mail_subj=Podman ${RC_PREFIX}${VERSION} Released" >> $GITHUB_OUTPUT + + cat <email_body.txt + Hi all, + + Podman ${RC_PREFIX}${VERSION} is now available. You may view the full details at + https://github.com/${{ github.repository }}/releases/tag/$VERSION + + Release ${RC_PREFIX}Notes: + -------------- + EOF + + echo ${{ secrets.GITHUB_TOKEN }} | gh auth login --with-token + gh release view $VERSION \ + --repo ${{ github.repository }} --json=body --jq '.body' >> email_body.txt + + # If job fails, permit operator to observe contents in case helpful. + - name: Provide release e-mail contents for examination + run: cat email_body.txt + + - name: Send release notification e-mail + # Ref: https://github.com/dawidd6/action-send-mail + uses: dawidd6/action-send-mail@v3.12.0 + with: + server_address: ${{secrets.ACTION_MAIL_SERVER}} + server_port: 465 + username: ${{secrets.ACTION_MAIL_USERNAME}} + password: ${{secrets.ACTION_MAIL_PASSWORD}} + subject: ${{ steps.format.outputs.mail_subj }} + to: Podman List + from: ${{secrets.ACTION_MAIL_SENDER}} + body: file://./email_body.txt diff --git a/.github/workflows/rerun_cirrus_cron.yml b/.github/workflows/rerun_cirrus_cron.yml index 785c47c499..7ab05d3a78 100644 --- a/.github/workflows/rerun_cirrus_cron.yml +++ b/.github/workflows/rerun_cirrus_cron.yml @@ -42,7 +42,7 @@ jobs: cron_rerun: runs-on: ubuntu-latest steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + - uses: actions/checkout@v4 with: # All scripts used by this workflow live in podman repo. repository: "containers/podman" @@ -66,7 +66,7 @@ jobs: - if: failure() name: Send error notification e-mail - uses: dawidd6/action-send-mail@v3.11.0 + uses: dawidd6/action-send-mail@v3.12.0 with: server_address: ${{secrets.ACTION_MAIL_SERVER}} server_port: 465 diff --git a/.github/workflows/update-podmanio.yml b/.github/workflows/update-podmanio.yml new file mode 100644 index 0000000000..84aacccddf --- /dev/null +++ b/.github/workflows/update-podmanio.yml @@ -0,0 +1,121 @@ +name: Update Podman version on Podman.io + +on: + release: + types: [published] + workflow_dispatch: + inputs: + version: + description: 'Release version to build and upload (e.g. "v9.8.7")' + required: true + +jobs: + bump: + name: Bump + runs-on: ubuntu-22.04 + steps: + - name: Get version + id: getversion + run: | + + if [[ -z "${{ inputs.version }}" ]] + then + VERSION=${{ github.event.release.tag_name }} + else + VERSION=${{ inputs.version }} + fi + + # strip out the prefix v if it's there + if [[ $VERSION == v* ]]; then + VERSION="${VERSION:1}" + fi + echo "Bump to ${VERSION}" + + if [[ $VERSION != *-rc* ]] && [[ $VERSION != *-dev ]]; then + echo "notRC=true" >> "$GITHUB_OUTPUT" + else + echo "SKIPPING: Version is a RC or a dev, no need to update." + fi + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + + - name: Check open PRs + if: steps.getversion.outputs.notRC == 'true' + id: checkpr + run: | + prs=$(gh pr list \ + --repo containers/podman.io \ + --head bump-podmanv${{ steps.getversion.outputs.version }} \ + --state open \ + --json title \ + --jq 'length') + if ((prs > 0)); then + echo "SKIPPING: PR already exists to update to v${{ steps.getversion.outputs.version }}." + else + echo "prexists=false" >> "$GITHUB_OUTPUT" + fi + env: + GH_TOKEN: ${{ secrets.PODMANBOT_TOKEN }} + + - uses: actions/checkout@v4 + if: >- + steps.getversion.outputs.notRC == 'true' && + steps.checkpr.outputs.prexists == 'false' + with: + repository: containers/podman.io + ref: refs/heads/main + token: ${{ secrets.PODMANBOT_TOKEN }} + + - name: Check version + if: >- + steps.getversion.outputs.notRC == 'true' && + steps.checkpr.outputs.prexists == 'false' + id: checkversion + run: | + # Check if version is actually higher than one on podman.io + prevversion=`grep -P "(?<=export const LATEST_VERSION = ')(\d.\d.\d)" -o static/data/global.ts` + echo "Version currently on site: ${prevversion}" + echo "Version to update to: ${{ steps.getversion.outputs.version }}" + # sort -V -C returns 0 if args are ascending version order + if echo "${prevversion},${{ steps.getversion.outputs.version }}" | tr ',' '\n' | sort -V -C && [[ ${prevversion} != ${{ steps.getversion.outputs.version }} ]] + then + echo "needsUpdate=true" >> $GITHUB_OUTPUT + echo "This release is a higher version, so we need to update podman.io" + else + echo "SKIPPING: This release is not a higher version, no need to update." + fi + + - name: Bump version + if: >- + steps.getversion.outputs.notRC == 'true' && + steps.checkversion.outputs.needsUpdate == 'true' && + steps.checkpr.outputs.prexists == 'false' + run: | + # Replace the version in static/data/global.ts file + sed -i "s/export const LATEST_VERSION = '.*';/export const LATEST_VERSION = '${{ steps.getversion.outputs.version }}';/g" static/data/global.ts + echo "Updated file:" + cat static/data/global.ts + + - name: Open PR + if: >- + steps.getversion.outputs.notRC == 'true' && + steps.checkversion.outputs.needsUpdate == 'true' && + steps.checkpr.outputs.prexists == 'false' + run: | + # Make commiter the user who triggered the action, either through cutting a release or manual trigger + # GitHub gives everyone a noreply email associated with their account, use that email for the sign-off + git config --local user.name ${{ github.actor }} + git config --local user.email "${{ github.actor_id }}+${{ github.actor }}@users.noreply.github.com" + bumpbranch="bump-podmanv${{ steps.getversion.outputs.version }}" + git checkout -b $bumpbranch + git add static/data/global.ts + git commit --signoff -m "Bump Podman to v${{ steps.getversion.outputs.version }}" + git remote -v + git remote add podmanbot https://github.com/podmanbot/podman.io + git push podmanbot "+$bumpbranch" + gh pr create \ + --title "Bump Podman to v${{ steps.getversion.outputs.version }}" \ + --body "Bump Podman to v${{ steps.getversion.outputs.version }}" \ + --head "podmanbot:$bumpbranch" \ + --base "main" -R "containers/podman.io" + env: + GH_TOKEN: ${{ secrets.PODMANBOT_TOKEN }} diff --git a/.github/workflows/upload-win-installer.yml b/.github/workflows/upload-win-installer.yml index b7cc0eacc5..c818c6a826 100644 --- a/.github/workflows/upload-win-installer.yml +++ b/.github/workflows/upload-win-installer.yml @@ -1,8 +1,6 @@ name: Upload Windows Installer on: - release: - types: [created, published, edited] workflow_dispatch: inputs: version: @@ -52,9 +50,10 @@ jobs: } } Write-Output "version=$version" | Out-File -FilePath $env:GITHUB_OUTPUT -Append - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 - with: - ref: ${{steps.getversion.outputs.version}} + # Note this purposefully checks out the same branch the action runs in, as the + # installer build script is designed to support older releases (uses the archives + # on the release tag). + - uses: actions/checkout@v4 # This step is super-duper critical for the built/signed windows installer .exe file. # It ensures the referenced $version github release page does NOT already contain # this file. Windows assigns a UUID to the installer at build time, it's assumed @@ -82,7 +81,7 @@ jobs: - name: Confirm upload_asset_name is non-empty if: ${{ steps.check.outputs.upload_asset_name == '' }} run: | - Write-Output "::error::check.ps1 script failed to find manually uploaded podman-remote-release-windows_md64.zip github release asset for version ${{steps.getversion.outputs.version}}." + Write-Output "::error::check.ps1 script failed to find manually uploaded podman-remote-release-windows_amd64.zip github release asset for version ${{steps.getversion.outputs.version}}." Exit 1 - name: Set up Go uses: actions/setup-go@v5 diff --git a/.gitignore b/.gitignore index 5e440f1d99..7ea6620913 100644 --- a/.gitignore +++ b/.gitignore @@ -44,3 +44,13 @@ result # Necessary to prevent hack/tree-status.sh false-positive /*runner_stats.log .generate-bindings +contrib/win-installer/artifacts/ +contrib/win-installer/docs/ +contrib/win-installer/fetch/ +contrib/win-installer/podman.msi +contrib/win-installer/podman-*setup.exe +contrib/win-installer/engine.exe +contrib/win-installer/shasums +contrib/win-installer/pages.wxs +contrib/win-installer/*.wixobj +contrib/win-installer/*.wixpdb diff --git a/.golangci.yml b/.golangci.yml index c095e4519e..33bf0c5bf6 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,13 +1,7 @@ --- run: concurrency: 6 - deadline: 5m - skip-dirs-use-default: true - skip-dirs: - - contrib - - dependencies - skip-files: - - swagger.go + timeout: 5m modules-download-mode: readonly linters: enable-all: true @@ -22,12 +16,10 @@ linters: - nakedret - gosmopolitan # usage of time.Local in pkg/k8s.io - tagliatelle # too many JSON keys cannot be changed due to compat - - nosnakecase # too many false positives due to the `unix` package - dupword # too many false positives (e.g., in tests) - gocognit - testpackage - - goerr113 - - exhaustivestruct + - err113 - errorlint - wrapcheck - paralleltest @@ -40,7 +32,6 @@ linters: - nestif - predeclared - thelper - - ifshort - forbidigo - exhaustive - gofumpt @@ -54,12 +45,13 @@ linters: - gocyclo - lll - gosec - - maligned - musttag # way to many warnings to fix for now, also some false positives + - mnd # way to many false positives - gomoddirectives - containedctx - contextcheck - cyclop + - canonicalheader # our current header values are fixed and should not be changed - errname - forcetypeassert - ireturn @@ -69,18 +61,11 @@ linters: - nonamedreturns - exhaustruct # deprecated linters - - golint # replaced by revive - - scopelint # replaced by exportloopref - - interfacer - - deadcode # deprecated since v1.49.0, replaced by unused - - structcheck # deprecated since v1.49.0, replaced by unused - - varcheck # deprecated since v1.49.0, replaced by unused + - execinquery linters-settings: errcheck: check-blank: false - ignore: fmt:.* nolintlint: - allow-leading-space: false allow-unused: true require-specific: true revive: @@ -97,3 +82,9 @@ issues: # Set to 0 to disable. # Default: 3 max-same-issues: 0 + exclude-dirs-use-default: true + exclude-dirs: + - contrib + - dependencies + exclude-files: + - swagger.go diff --git a/.packit.yaml b/.packit.yaml index afcebd10e7..77833712bb 100644 --- a/.packit.yaml +++ b/.packit.yaml @@ -2,9 +2,19 @@ # See the documentation for more information: # https://packit.dev/docs/configuration/ -specfile_path: rpm/podman.spec +downstream_package_name: podman upstream_tag_template: v{version} +packages: + podman-fedora: + pkg_tool: fedpkg + specfile_path: rpm/podman.spec + podman-centos: + pkg_tool: centpkg + specfile_path: rpm/podman.spec + podman-rhel: + specfile_path: rpm/podman.spec + srpm_build_deps: - git-archive-all - make @@ -16,25 +26,45 @@ actions: jobs: - job: copr_build trigger: pull_request - notifications: + packages: [podman-fedora] + notifications: &packit_build_failure_notification failure_comment: message: "Ephemeral COPR build failed. @containers/packit-build please check." enable_net: true targets: - - fedora-all-x86_64 - - fedora-all-aarch64 - - fedora-eln-x86_64 - - fedora-eln-aarch64 - - centos-stream+epel-next-8-x86_64 - - centos-stream+epel-next-8-aarch64 - - centos-stream+epel-next-9-x86_64 - - centos-stream+epel-next-9-aarch64 - additional_repos: - - "copr://rhcontainerbot/podman-next" + fedora-all-x86_64: {} + fedora-all-aarch64: {} + fedora-eln-x86_64: + additional_repos: + - "https://kojipkgs.fedoraproject.org/repos/eln-build/latest/x86_64/" + fedora-eln-aarch64: + additional_repos: + - "https://kojipkgs.fedoraproject.org/repos/eln-build/latest/aarch64/" + + - job: copr_build + trigger: pull_request + packages: [podman-centos] + notifications: *packit_build_failure_notification + enable_net: true + targets: + - centos-stream-9-x86_64 + - centos-stream-9-aarch64 + - centos-stream-10-x86_64 + - centos-stream-10-aarch64 + + - job: copr_build + trigger: pull_request + packages: [podman-rhel] + notifications: *packit_build_failure_notification + enable_net: true + targets: + - epel-9-x86_64 + - epel-9-aarch64 # Run on commit to main branch - job: copr_build trigger: commit + packages: [podman-fedora] notifications: failure_comment: message: "podman-next COPR build failed. @containers/packit-build please check." @@ -46,6 +76,7 @@ jobs: - job: tests identifier: cockpit-revdeps trigger: pull_request + packages: [podman-fedora] notifications: failure_comment: message: "Cockpit tests failed for commit {commit_sha}. @martinpitt, @jelly, @mvollmer please check." @@ -66,16 +97,26 @@ jobs: - job: propose_downstream trigger: release update_release: false + packages: [podman-fedora] + dist_git_branches: + - fedora-development + - fedora-latest + + - job: propose_downstream + trigger: release + update_release: false + packages: [podman-centos] dist_git_branches: - - fedora-development # Implies fedora-rawhide and any branched but unreleased version, will include f40 before f40 is marked stable. + - c10s - job: koji_build trigger: commit + packages: [podman-fedora] dist_git_branches: - - fedora-development + - fedora-all - # TODO: Revisit once fedora 40 is branched and manual bodhi is enabled - #- job: bodhi_update - #trigger: commit - #dist_git_branches: - #- fedora-40 # rawhide updates are created automatically + - job: bodhi_update + trigger: commit + packages: [podman-fedora] + dist_git_branches: + - fedora-branched # rawhide updates are created automatically diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7e75e4f6ea..936b0b14d0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,13 +4,15 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks.git rev: v3.4.0 hooks: - # buildah-tests.diff is generated by 'git format-patch' and includes + # `buildah-tests.diff` is generated by 'git format-patch' and includes # trailing whitespace as part of its format. We can work around that, # but unfortunately the buildah repo has some files with tabs, which # git-diff formats as '[+/-]', which these hooks choke on. - # Just disable checks on this diff file as a special case. + # `contrib/systemd/user` is a symlink but for some reason, on windows, + # pre-commit consider it as a regular file and tries to fix it. + # Just disable checks on these files as a special case. - id: end-of-file-fixer - exclude: test/buildah-bud/buildah-tests.diff + exclude: test/buildah-bud/buildah-tests.diff|contrib/systemd/user - id: trailing-whitespace exclude: test/buildah-bud/buildah-tests.diff|test/e2e/quadlet/remap-keep-id2.container|test/e2e/quadlet/line-continuation-whitespace.container - id: mixed-line-ending diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b1d02cea12..e3443d1997 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -9,7 +9,7 @@ that we follow. * [Reporting Issues](#reporting-issues) * [Working On Issues](#working-on-issues) * [Contributing to Podman](#contributing-to-podman) -* [Continuous Integration](#continuous-integration) [![Build Status](https://api.cirrus-ci.com/github/containers/podman.svg)](https://cirrus-ci.com/github/containers/podman/master) +* [Continuous Integration](#continuous-integration) [![Build Status](https://api.cirrus-ci.com/github/containers/podman.svg)](https://cirrus-ci.com/github/containers/podman/main) * [Submitting Pull Requests](#submitting-pull-requests) * [Communications](#communications) @@ -94,24 +94,6 @@ Makefile allow you to install needed tools: $ make install.tools ``` -### Prerequisite before build - -You need install some dependencies before building a binary. - -#### Fedora - - ```shell - $ sudo dnf install gpgme-devel libseccomp-devel.x86_64 systemd-devel - $ export PKG_CONFIG_PATH="/usr/lib/pkgconfig" - ``` - -#### Debian / Ubuntu - - ```shell - $ sudo apt-get install -y libsystemd-dev libgpgme-dev libseccomp-dev - $ export PKG_CONFIG_PATH="/usr/lib/pkgconfig" - ``` - ### Building binaries and test your changes To test your changes do `make binaries` to generate your binaries. @@ -164,9 +146,9 @@ Regardless of the type of PR, all PRs should include: * well documented code changes. * additional testcases. Ideally, they should fail w/o your code change applied. (With a few exceptions, CI hooks will block your PR unless your change - includes files named `*_test.go` or under the `test/` subdirectory. To - bypass this block, include the string `[NO NEW TESTS NEEDED]` in your - commit message). + includes files named `*_test.go` or under the `test/` subdirectory. Repo + admins may bypass this restriction by setting the 'No New Tests' GitHub + label on the PR). * documentation changes. Squash your commits into logical pieces of work that might want to be reviewed @@ -309,13 +291,7 @@ commit automatically with `git commit -s`. ### Go Format and lint -All code changes must pass ``make validate`` and ``make lint``. - -``` -podman build -t gate -f contrib/gate/Dockerfile . -``` - -***N/B:*** **don't miss the dot (.) at the end, it's really important** +All code changes must pass ``make validatepr``. ### Integration Tests @@ -340,7 +316,7 @@ All pull requests and branch-merges automatically run: There is always additional complexity added by automation, and so it sometimes can fail for any number of reasons. This includes post-merge testing on all branches, which you may occasionally see [red bars on the status graph -.](https://cirrus-ci.com/github/containers/podman/master) +.](https://cirrus-ci.com/github/containers/podman/main) When the graph shows mostly green bars on the right, it's a good indication the main branch is currently stable. Alternating red/green bars is indicative diff --git a/DOWNLOADS.md b/DOWNLOADS.md index 01b47e777a..def26314cc 100644 --- a/DOWNLOADS.md +++ b/DOWNLOADS.md @@ -40,16 +40,17 @@ matches corresponding changes in the artifacts task. , and [rootlessport](https://api.cirrus-ci.com/v1/artifact/github/containers/podman/Artifacts/binary/rootlessport) - Built on the latest supported Fedora release. -* MacOS, - [both x86_64](https://api.cirrus-ci.com/v1/artifact/github/containers/podman/Artifacts/binary/podman-installer-macos-amd64.pkg) - and - [aarch64 (ARM)](https://api.cirrus-ci.com/v1/artifact/github/containers/podman/Artifacts/binary/podman-installer-macos-aarch64.pkg) - installation packages. Again, these are **not** signed, so expect warnings. There's - also binary release *ZIP-files* for +* MacOS + [universal](https://api.cirrus-ci.com/v1/artifact/github/containers/podman/Artifacts/binary/podman-installer-macos-universal.pkg) + , + [x86_64](https://api.cirrus-ci.com/v1/artifact/github/containers/podman/Artifacts/binary/podman-installer-macos-amd64.pkg) + , and + [arm64](https://api.cirrus-ci.com/v1/artifact/github/containers/podman/Artifacts/binary/podman-installer-macos-arm64.pkg) + installation packages. Again, these are **not** signed, so expect warnings if you try to install them. + There's also binary release *ZIP-files* for [darwin_amd64](https://api.cirrus-ci.com/v1/artifact/github/containers/podman/Artifacts/binary/podman-remote-release-darwin_amd64.zip) and [darwin_arm64](https://api.cirrus-ci.com/v1/artifact/github/containers/podman/Artifacts/binary/podman-remote-release-darwin_arm64.zip). - if you try to install them. * Windows [podman-remote](https://api.cirrus-ci.com/v1/artifact/github/containers/podman/Artifacts/binary/podman-remote-release-windows_amd64.zip) for x86_64 only. * Other podman-remote release builds (includes configuration files & documentation): * [podman-release-386.tar.gz](https://api.cirrus-ci.com/v1/artifact/github/containers/podman/Artifacts/binary/podman-release-386.tar.gz) diff --git a/Makefile b/Makefile index 6b320c1aa7..49236abd3d 100644 --- a/Makefile +++ b/Makefile @@ -57,10 +57,12 @@ BUILDTAGS ?= \ $(shell hack/libsubid_tag.sh) \ exclude_graphdriver_devicemapper \ seccomp +# allow downstreams to easily add build tags while keeping our defaults +BUILDTAGS += ${EXTRA_BUILDTAGS} # N/B: This value is managed by Renovate, manual changes are # possible, as long as they don't disturb the formatting # (i.e. DO NOT ADD A 'v' prefix!) -GOLANGCI_LINT_VERSION := 1.56.2 +GOLANGCI_LINT_VERSION := 1.59.1 PYTHON ?= $(shell command -v python3 python|head -n1) PKG_MANAGER ?= $(shell command -v dnf yum|head -n1) # ~/.local/bin is not in PATH on all systems @@ -69,6 +71,8 @@ ifeq ($(shell uname -s),FreeBSD) SED=gsed GREP=ggrep MAN_L= mandoc +# FreeBSD needs CNI until netavark is supported +BUILDTAGS += cni else SED=sed GREP=grep @@ -101,6 +105,7 @@ FISHINSTALLDIR=${PREFIX}/share/fish/vendor_completions.d SELINUXOPT ?= $(shell test -x /usr/sbin/selinuxenabled && selinuxenabled && echo -Z) + COMMIT_NO ?= $(shell git rev-parse HEAD 2> /dev/null || true) GIT_COMMIT ?= $(if $(shell git status --porcelain --untracked-files=no),$(call err_if_empty,COMMIT_NO)-dirty,$(COMMIT_NO)) DATE_FMT = %s @@ -164,6 +169,7 @@ CROSS_BUILD_TARGETS := \ bin/podman.cross.linux.arm64 \ bin/podman.cross.linux.386 \ bin/podman.cross.linux.s390x \ + bin/podman.cross.linux.loong64 \ bin/podman.cross.linux.mips \ bin/podman.cross.linux.mipsle \ bin/podman.cross.linux.mips64 \ @@ -232,11 +238,11 @@ binaries: podman podman-remote ## Build podman and podman-remote binaries else ifneq (, $(findstring $(GOOS),darwin windows)) binaries: podman-remote ## Build podman-remote (client) only binaries else -binaries: podman podman-remote podmansh rootlessport quadlet ## Build podman, podman-remote and rootlessport binaries quadlet +binaries: podman podman-remote podman-testing podmansh rootlessport quadlet ## Build podman, podman-remote and rootlessport binaries quadlet endif # Extract text following double-# for targets, as their description for -# the `help` target. Otherwise These simple-substitutions are resolved +# the `help` target. Otherwise these simple-substitutions are resolved # at reference-time (due to `=` and not `=:`). _HLP_TGTS_RX = '^[[:print:]]+:.*?\#\# .*$$' _HLP_TGTS_CMD = $(GREP) -E $(_HLP_TGTS_RX) $(MAKEFILE_LIST) @@ -270,7 +276,6 @@ help: ## (Default) Print listing of key targets with their descriptions .PHONY: lint lint: golangci-lint - @echo "Linting vs commit '$(call err_if_empty,EPOCH_TEST_COMMIT)'" ifeq ($(PRE_COMMIT),) @echo "FATAL: pre-commit was not found, make .install.pre-commit to installing it." >&2 @exit 2 @@ -306,8 +311,33 @@ test/version/version: version/version.go codespell: codespell -S bin,vendor,.git,go.sum,.cirrus.yml,"*.fish,RELEASE_NOTES.md,*.xz,*.gz,*.ps1,*.tar,swagger.yaml,*.tgz,bin2img,*ico,*.png,*.1,*.5,copyimg,*.orig,apidoc.go" -L secon,passt,bu,hastable,te,clos,ans,pullrequest,uint,iff,od,seeked,splitted,marge,erro,hist,ether,specif -w +# Code validation target that **DOES NOT** require building podman binaries +.PHONY: validate-source +validate-source: lint .gitvalidation swagger-check tests-expect-exit pr-removes-fixed-skips + +# Code validation target that **DOES** require building podman binaries +.PHONY: validate-binaries +validate-binaries: man-page-check validate.completions + .PHONY: validate -validate: lint .gitvalidation validate.completions man-page-check swagger-check tests-included tests-expect-exit pr-removes-fixed-skips +validate: validate-source validate-binaries + +# The image used below is generated manually from contrib/validatepr/Containerfile in this podman repo. The builds are +# not automated right now. The hope is that eventually the quay.io/libpod/fedora_podman is multiarch and can replace this +# image in the future. +.PHONY: validatepr +validatepr: + $(PODMANCMD) run --rm \ + -v $(CURDIR):/go/src/github.com/containers/podman \ + --security-opt label=disable \ + -it \ + -w /go/src/github.com/containers/podman \ + quay.io/libpod/validatepr:latest \ + make .validatepr + +.PHONY: .validatepr +.validatepr: + env BUILDTAGS="$(BUILDTAGS)" REMOTETAGS="$(REMOTETAGS)" contrib/validatepr/validatepr.sh .PHONY: build-all-new-commits build-all-new-commits: @@ -332,7 +362,7 @@ $(IN_CONTAINER): %-in-container: $(PODMANCMD) run --rm --env HOME=/root \ -v $(CURDIR):/src -w /src \ --security-opt label=disable \ - docker.io/library/golang:1.20 \ + docker.io/library/golang:1.22 \ make $(*) @@ -352,6 +382,7 @@ endif $(GO_LDFLAGS) '$(LDFLAGS_PODMAN)' \ -tags "$(BUILDTAGS)" \ -o $@ ./cmd/podman + test -z "${SELINUXOPT}" || chcon -t container_runtime_exec_t $@ # Disambiguate Linux vs Darwin/Windows platform binaries under distinct "bin" dirs $(SRCBINDIR): @@ -365,10 +396,10 @@ $(SRCBINDIR)/podman$(BINSFX): $(SOURCES) go.mod go.sum | $(SRCBINDIR) -tags "${REMOTETAGS}" \ -o $@ ./cmd/podman -$(SRCBINDIR)/podman-remote-static-linux_amd64 $(SRCBINDIR)/podman-remote-static-linux_arm64: $(SRCBINDIR)/podman-remote-static-linux_%: $(SRCBINDIR) $(SOURCES) go.mod go.sum +$(SRCBINDIR)/podman-remote-static-linux_%: GOARCH = $(patsubst $(SRCBINDIR)/podman-remote-static-linux_%,%,$@) +$(SRCBINDIR)/podman-remote-static-linux_%: GOOS = linux +$(SRCBINDIR)/podman-remote-static $(SRCBINDIR)/podman-remote-static-linux_amd64 $(SRCBINDIR)/podman-remote-static-linux_arm64: $(SRCBINDIR) $(SOURCES) go.mod go.sum CGO_ENABLED=0 \ - GOOS=linux \ - GOARCH=$* \ $(GO) build \ $(BUILDFLAGS) \ $(GO_LDFLAGS) '$(LDFLAGS_PODMAN_STATIC)' \ @@ -392,7 +423,8 @@ $(SRCBINDIR)/quadlet: $(SOURCES) go.mod go.sum .PHONY: quadlet quadlet: bin/quadlet -PHONY: podman-remote-static-linux_amd64 podman-remote-static-linux_arm64 +.PHONY: podman-remote-static podman-remote-static-linux_amd64 podman-remote-static-linux_arm64 +podman-remote-static: $(SRCBINDIR)/podman-remote-static podman-remote-static-linux_amd64: $(SRCBINDIR)/podman-remote-static-linux_amd64 podman-remote-static-linux_arm64: $(SRCBINDIR)/podman-remote-static-linux_arm64 @@ -431,6 +463,16 @@ rootlessport: bin/rootlessport podmansh: bin/podman if [ ! -f bin/podmansh ]; then ln -s podman bin/podmansh; fi +$(SRCBINDIR)/podman-testing: $(SOURCES) go.mod go.sum + $(GOCMD) build \ + $(BUILDFLAGS) \ + $(GO_LDFLAGS) '$(LDFLAGS_PODMAN)' \ + -tags "${BUILDTAGS}" \ + -o $@ ./cmd/podman-testing + +.PHONY: podman-testing +podman-testing: bin/podman-testing + ### ### Secondary binary-build targets ### @@ -475,12 +517,13 @@ completions: podman podman-remote ### Documentation targets ### -pkg/api/swagger.yaml: +pkg/api/swagger.yaml: .install.swagger make -C pkg/api $(MANPAGES_MD_GENERATED): %.md: %.md.in $(MANPAGES_SOURCE_DIR)/options/*.md hack/markdown-preprocess +$(MANPAGES): OUTFILE=$(subst source/markdown,build/man,$@) $(MANPAGES): %: %.md .install.md2man docdir # This does a bunch of filtering needed for man pages: @@ -507,12 +550,12 @@ $(MANPAGES): %: %.md .install.md2man docdir -e 's/\[\([^]]*\)](http[^)]\+)/\1/g' \ -e 's;<\(/\)\?\(a\|a\s\+[^>]*\|sup\)>;;g' \ -e 's/\\$$/ /g' $< |\ - $(GOMD2MAN) -out $(subst source/markdown,build/man,$@) - @if grep 'included file options/' docs/build/man/*; then \ - echo "FATAL: man pages must not contain ^^^^"; exit 1; \ + $(GOMD2MAN) -out $(OUTFILE) + @if grep 'included file options/' $(OUTFILE); then \ + echo "FATAL: man pages must not contain ^^^^ in $(OUTFILE)"; exit 1; \ fi - @if $(MAN_L) $(subst source/markdown,build/man,$@) | $(GREP) -Pazoq '│\s+│\n\s+├─+┼─+┤\n\s+│\s+│'; then \ - echo "FATAL: $< has a too-long table column; use 'man -l $(subst source/markdown,build/man,$@)' and look for empty table cells."; exit 1; \ + @if $(MAN_L) $(OUTFILE)| $(GREP) -Pazoq '│\s+│\n\s+├─+┼─+┤\n\s+│\s+│'; then \ + echo "FATAL: $< has a too-long table column; use 'man -l $(OUTFILE)' and look for empty table cells."; exit 1; \ fi .PHONY: docdir @@ -540,7 +583,7 @@ podman-remote-%-docs: podman-remote $(if $(findstring windows,$*),docs/source/markdown,docs/build/man) .PHONY: man-page-check -man-page-check: bin/podman +man-page-check: bin/podman docs hack/man-page-checker hack/xref-helpmsgs-manpages hack/man-page-table-check @@ -565,7 +608,7 @@ docker-docs: docs .PHONY: validate.completions validate.completions: SHELL:=/usr/bin/env bash # Set shell to bash for this target -validate.completions: +validate.completions: completions # Check if the files can be loaded by the shell . completions/bash/podman if [ -x /bin/zsh ]; then /bin/zsh completions/zsh/_podman; fi @@ -599,22 +642,26 @@ localunit: test/goecho/goecho test/version/version test: localunit localintegration remoteintegration localsystem remotesystem ## Run unit, integration, and system tests. .PHONY: ginkgo-run +# e2e tests need access to podman-registry +ginkgo-run: PATH := $(PATH):$(CURDIR)/hack ginkgo-run: .install.ginkgo $(GINKGO) version $(GINKGO) -vv $(TESTFLAGS) --tags "$(TAGS) remote" $(GINKGOTIMEOUT) --flake-attempts $(GINKGO_FLAKE_ATTEMPTS) \ --trace $(if $(findstring y,$(GINKGO_NO_COLOR)),--no-color,) \ $(GINKGO_JSON) $(if $(findstring y,$(GINKGO_PARALLEL)),-p,) $(if $(FOCUS),--focus "$(FOCUS)",) \ - $(if $(FOCUS_FILE),--focus-file "$(FOCUS_FILE)",) $(GINKGOWHAT) $(HACK) + $(if $(FOCUS_FILE),--focus-file "$(FOCUS_FILE)",) $(GINKGOWHAT) .PHONY: ginkgo ginkgo: - $(MAKE) ginkgo-run TAGS="$(BUILDTAGS)" HACK=hack/. + $(MAKE) ginkgo-run TAGS="$(BUILDTAGS)" .PHONY: ginkgo-remote ginkgo-remote: - $(MAKE) ginkgo-run TAGS="$(REMOTETAGS) remote_testing" HACK= + $(MAKE) ginkgo-run TAGS="$(REMOTETAGS) remote_testing" .PHONY: testbindings +# bindings tests need access to podman-registry +testbindings: PATH := $(PATH):$(CURDIR)/hack testbindings: .install.ginkgo $(GINKGO) -v $(TESTFLAGS) --tags "$(TAGS) remote" $(GINKGOTIMEOUT) --trace --no-color --timeout 30m -v -r ./pkg/bindings/test @@ -626,13 +673,15 @@ remoteintegration: test-binaries ginkgo-remote .PHONY: localmachine localmachine: - $(MAKE) ginkgo-run GINKGO_PARALLEL=n TAGS="$(REMOTETAGS)" GINKGO_FLAKE_ATTEMPTS=0 FOCUS_FILE=$(FOCUS_FILE) GINKGOWHAT=pkg/machine/e2e/. HACK= + # gitCommit needed by logformatter, to link to sources + @echo /define.gitCommit=$(GIT_COMMIT) + $(MAKE) ginkgo-run GINKGO_PARALLEL=n TAGS="$(REMOTETAGS)" GINKGO_FLAKE_ATTEMPTS=0 FOCUS_FILE=$(FOCUS_FILE) GINKGOWHAT=pkg/machine/e2e/. .PHONY: localsystem localsystem: # Wipe existing config, database, and cache: start with clean slate. $(RM) -rf ${HOME}/.local/share/containers ${HOME}/.config/containers - if timeout -v 1 true; then PODMAN=$(CURDIR)/bin/podman QUADLET=$(CURDIR)/bin/quadlet bats test/system/; else echo "Skipping $@: 'timeout -v' unavailable'"; fi + if timeout -v 1 true; then PODMAN=$(CURDIR)/bin/podman QUADLET=$(CURDIR)/bin/quadlet bats -T test/system/; else echo "Skipping $@: 'timeout -v' unavailable'"; fi .PHONY: remotesystem remotesystem: @@ -657,7 +706,7 @@ remotesystem: echo "Error: ./bin/podman system service did not come up on $$SOCK_FILE" >&2;\ exit 1;\ fi;\ - env PODMAN="$(CURDIR)/bin/podman-remote --url $$PODMAN_SOCKET" bats test/system/ ;\ + env PODMAN="$(CURDIR)/bin/podman-remote --url $$PODMAN_SOCKET" bats -T test/system/ ;\ rc=$$?;\ kill %1;\ rm -f $$SOCK_FILE;\ @@ -844,6 +893,11 @@ ifneq ($(shell uname -s),FreeBSD) install ${SELINUXOPT} -m 644 contrib/tmpfile/podman.conf $(DESTDIR)${TMPFILESDIR}/podman.conf endif +.PHONY: install.testing +install.testing: + install ${SELINUXOPT} -d -m 755 $(DESTDIR)$(BINDIR) + install ${SELINUXOPT} -m 755 bin/podman-testing $(DESTDIR)$(BINDIR)/podman-testing + .PHONY: install.modules-load install.modules-load: # This should only be used by distros which might use iptables-legacy, this is not needed on RHEL install ${SELINUXOPT} -m 755 -d $(DESTDIR)${MODULESLOADDIR} @@ -916,6 +970,7 @@ install.systemd: $(PODMAN_UNIT_FILES) install ${SELINUXOPT} -m 644 contrib/systemd/system/podman.service $(DESTDIR)${USERSYSTEMDDIR}/podman.service install ${SELINUXOPT} -m 644 contrib/systemd/system/podman-restart.service $(DESTDIR)${USERSYSTEMDDIR}/podman-restart.service install ${SELINUXOPT} -m 644 contrib/systemd/system/podman-kube@.service $(DESTDIR)${USERSYSTEMDDIR}/podman-kube@.service + install ${SELINUXOPT} -m 644 contrib/systemd/system/podman-clean-transient.service $(DESTDIR)${USERSYSTEMDDIR}/podman-clean-transient.service # System services install ${SELINUXOPT} -m 644 contrib/systemd/auto-update/podman-auto-update.service $(DESTDIR)${SYSTEMDDIR}/podman-auto-update.service install ${SELINUXOPT} -m 644 contrib/systemd/auto-update/podman-auto-update.timer $(DESTDIR)${SYSTEMDDIR}/podman-auto-update.timer @@ -951,11 +1006,7 @@ install.tools: .install.golangci-lint ## Install needed tools .PHONY: .install.swagger .install.swagger: - env VERSION=0.30.3 \ - BINDIR=$(BINDIR) \ - GOOS=$(GOOS) \ - GOARCH=$(GOARCH) \ - ./hack/install_swagger.sh + $(MAKE) -C test/tools build/swagger .PHONY: .install.md2man .install.md2man: diff --git a/OWNERS b/OWNERS index cf3ad0ae3d..a4374ed18f 100644 --- a/OWNERS +++ b/OWNERS @@ -8,6 +8,7 @@ approvers: - flouthoc - giuseppe - jakecorrenti + - jnovy - jwhonce - lsm5 - mheon @@ -28,7 +29,9 @@ reviewers: - flouthoc - giuseppe - jakecorrenti + - jnovy - jwhonce + - l0rd - lsm5 - mheon - mtrmac diff --git a/README.md b/README.md index 936d72e250..869ab8f298 100644 --- a/README.md +++ b/README.md @@ -77,7 +77,7 @@ A little configuration by an administrator is required before rootless Podman ca [Podman Desktop](https://podman-desktop.io/) provides a local development environment for Podman and Kubernetes on Linux, Windows, and Mac machines. It is a full-featured desktop UI frontend for Podman which uses the `podman machine` backend on non-Linux operating systems to run containers. It supports full container lifecycle management (building, pulling, and pushing images, creating and managing containers, creating and managing pods, and working with Kubernetes YAML). -The project develops on [GitHub](https://github.com/containers/podman-desktop) and contributors are welcome. +The project develops on [GitHub](https://github.com/containers/podman-desktop) and contributions are welcome. ## Out of scope @@ -114,6 +114,9 @@ Documentation on the Podman REST API. A list of the Podman commands with links to their man pages and in many cases videos showing the commands in use. +**[Podman Container Images](https://github.com/containers/image_build/blob/main/podman/README.md)** +Information on the Podman Container Images found on [quay.io](https://quay.io/podman/stable). + **[Podman Troubleshooting Guide](troubleshooting.md)** A list of common issues and solutions for Podman. diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 60414b84be..8ae3591366 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -1,5 +1,197 @@ # Release Notes +## 5.1.0 +### Features +- VMs created by `podman machine` on macOS with Apple silicon can now use Rosetta 2 (a.k.a Rosetta) for high-speed emulation of x86 code. This is enabled by default. If you wish to change this option, you can do so in `containers.conf`. +- Changes made by the `podman update` command are now persistent, and will survive container restart and be reflected in `podman inspect`. +- The `podman update` command now includes a new option, `--restart`, to update the restart policy of existing containers. +- Quadlet `.container` files now support a new key, `GroupAdd`, to add groups to the container. +- Container annotations are now printed by `podman inspect`. +- Image-based mounts using `podman run --mount type=image,...` now support a new option, `subpath`, to mount only part of the image into the container. +- A new field, `healthcheck_events`, has been added to `containers.conf` under the `[engine]` section to allow users to disable the generation of `health_status` events to avoid spamming logs on systems with many healthchecks. +- A list of images to automatically mount as volumes can now be specified in Kubernetes YAML via the `io.podman.annotations.kube.image.automount/$CTRNAME` annotation (where `$CTRNAME` is the name of the container they will be mounted into). +- The `podman info` command now includes the default rootless network command (`pasta` or `slirp4netns`). +- The `podman ps` command now shows ports from `--expose` that have not been published with `--publish-all` to improve Docker compatibility. +- The `podman container runlabel` command now expands `$HOME` in the label being run to the user's home directory. +- A new alias, `podman network list`, has been added to the `podman network ls` command. +- The name and shell of containers created by `podmansh` can now be set in `containers.conf`. +- The `podman-setup.exe` Windows installer now provides 3 new CLI variables, `MachineProvider` (choose the provider for the machine, `windows` or `wsl`, the default), `HyperVCheckbox` (can be set to `1` to install HyperV if it is not already installed or `0`, the default, to not install HyperV), and `SkipConfigFileCreation` (can be set to `1` to disable the creation of configuration files, or `0`, the default). + +### Changes +- Podman now changes volume ownership every time an empty named volume is mounted into a container, not just the first time, matching Docker's behavior. +- When running Kubernetes YAML with `podman kube play` that does not include an `imagePullPolicy` and does not set a tag for the image, the image is now always pulled ([#21211](https://github.com/containers/podman/issues/21211)). +- When running Kubernetes YAML with `podman kube play`, pod-level restart policies are now passed down to individual containers within the pod ([#20903](https://github.com/containers/podman/issues/20903)). +- The `--runroot` global option can now accept paths with lengths longer than 50 characters ([#22272](https://github.com/containers/podman/issues/22272)). +- Updating containers with the `podman update` command now emits an event. + +### Bugfixes +- Fixed a bug where the `--userns=keep-id:uid=0` option to `podman create` and `podman run` would generate incorrect UID mappings and cause the container to fail to start ([#22078](https://github.com/containers/podman/issues/22078)). +- Fixed a bug where `podman stats` could report inaccurate percentages for very large or very small values ([#22064](https://github.com/containers/podman/issues/22064)). +- Fixed a bug where bind-mount volumes defaulted to `rbind` instead of `bind`, meaning recursive mounts were allowed by default ([#22107](https://github.com/containers/podman/issues/22107)). +- Fixed a bug where the `podman machine rm -f` command would fail to remove Hyper-V virtual machines if they were running. +- Fixed a bug where the `podman ps --sync` command could sometimes fail to properly update the status of containers. +- Fixed a bug where bind-mount volumes using the `:idmap` option would sometimes be inaccessible with rootless Podman ([#22228](https://github.com/containers/podman/issues/22228)). +- Fixed a bug where bind-mount volumes using the `:U` option would have their ownership changed to the owner of the directory in the image being mounted over ([#22224](https://github.com/containers/podman/issues/22224)). +- Fixed a bug where removing multiple containers, pods, or images with the `--force` option did not work when multiple arguments were given to the command and one of them did not exist ([#21529](https://github.com/containers/podman/issues/21529)). +- Fixed a bug where Podman did not properly clean up old cached Machine images. +- Fixed a bug where rapidly-restarting containers with healthchecks could sometimes fail to start their healthchecks after restarting. +- Fixed a bug where nested Podman could create its `pause.pid` file in an incorrect directory ([#22327](https://github.com/containers/podman/issues/22327)). +- Fixed a bug where Podman would panic if an OCI runtime was configured without associated paths in `containers.conf` ([#22561](https://github.com/containers/podman/issues/22561)). +- Fixed a bug where the `podman kube down` command would not respect the `StopTimeout` and `StopSignal` of containers that it stopped ([#22397](https://github.com/containers/podman/issues/22397)). +- Fixed a bug where Systemd-managed containers could be stuck in the Stopping state, unable to be restarted, if systemd killed the unit before `podman stop` finished stopping the container ([#19629](https://github.com/containers/podman/issues/19629)). +- Fixed a bug where the remote Podman client's `podman farm build` command would not updating manifests on the registry that were already pushed ([#22647](https://github.com/containers/podman/issues/22647)). +- Fixed a bug where rootless Podman could fail to re-exec itself when run with a custom `argv[0]` that is not a valid command path, as might happen when used in `podmansh` ([#22672](https://github.com/containers/podman/issues/22672)). +- Fixed a bug where `podman machine` connection URIs could be incorrect after an SSH port conflict, rendering machines inaccessible. +- Fixed a bug where the `podman events` command would not print an error if incorrect values were passed to its `--since` and `--until` options. +- Fixed a bug where an incorrect `host.containers.internal` entry could be added when running rootless containers using the `bridge` network mode ([#22653](https://github.com/containers/podman/issues/22653)). + +### API +- A new Docker-compatible endpoint, Update, has been added for containers. +- The Compat Create endpoint for Containers now supports setting container annotations. +- The Libpod List endpoint for Images now includes additional information in its responses (image architecture, OS, and whether the image is a manifest list) ([#22184](https://github.com/containers/podman/issues/22184) and [#22185](https://github.com/containers/podman/issues/22185)). +- The Build endpoint for Images no longer saves the build context as a temporary file, substantially improving performance and reducing required filesystem space on the server. +- The Inspect API for Containers now returns results compatible with Podman v4.x when a request with version v4.0.0 is made. This allows Podman 4.X remote clients work with a Podman 5.X server ([#22657](https://github.com/containers/podman/issues/22657)). +- Fixed a bug where the Build endpoint for Images would not clean up temporary files created by the build if an error occurred. + +### Misc +- Podman now detects unhandled system reboots and advises the user on proper mitigations. +- Improved debugging output for `podman machine` on Darwin systems when `--log-level=debug` is used. +- The Makefile now allows injecting extra build tags via the `EXTRA_BUILD_TAGS` environment variable. +- Updated Buildah to v1.36.0 +- Updated the containers/common library to v0.59.0 +- Updated the containers/image library to v5.31.0 +- Updated the containers/storage library to v1.54.0 + +## 5.0.3 +### Security +- This release addresses CVE-2024-3727, a vulnerability in the containers/image library which allows attackers to trigger authenticated registry access on behalf of the victim user. + +### Bugfixes +- Fixed a bug where `podman machine start` would fail if the machine had a volume with a long target path ([#22226](https://github.com/containers/podman/issues/22226)). +- Fixed a bug where `podman machine start` mounted volumes with paths that included dashes in the wrong location ([#22505](https://github.com/containers/podman/issues/22505)). + +### Misc +- Updated Buildah to v1.35.4 +- Updated the containers/common library to v0.58.3 +- Updated the containers/image library to v5.30.1 + +## 5.0.2 +### Bugfixes +- Fixed a bug that could leak IPAM entries when a network was removed ([#22034](https://github.com/containers/podman/issues/22034)). +- Fixed a bug that could cause the rootless network namespace to not be cleaned up on if an error occurred during setup resulting in errors relating to a missing resolv.conf being displayed ([#22168](https://github.com/containers/podman/issues/22168)). +- Fixed a bug where Podman would use rootless network namespace logic for nested containers ([#22218](https://github.com/containers/podman/issues/22218)). +- Fixed a bug where writing to volumes on a Mac could result in EACCESS failures when using the `:z` or `:Z` volume mount options on a directory with read only files ([#19852](https://github.com/containers/podman/issues/19852)) + +### API +- Fixed a bug in the Compat List endpoint for Networks which could result in a server crash due to concurrent writes to a map ([#22330](https://github.com/containers/podman/issues/22330)). + +## 5.0.1 +### Bugfixes +- Fixed a bug where rootless containers using the Pasta network driver did not properly handle localhost DNS resolvers on the host leading to DNS resolution issues ([#22044](https://github.com/containers/podman/issues/22044)). +- Fixed a bug where Podman would warn that cgroups v1 systems were no longer supported on FreeBSD hosts. +- Fixed a bug where HyperV `podman machine` VMs required an SSH client be installed on the system ([#22075](https://github.com/containers/podman/issues/22075)). +- Fixed a bug that prevented the remote Podman client's `podman build` command from working properly when connecting from a rootless client to a rootful server ([#22109](https://github.com/containers/podman/issues/22109)). + +### Misc +- The HyperV driver to `podman machine` now fails immediately if admin privileges are not available (previously, it would only fail when it reached operations that required admin privileges). + +## 5.0.0 +### Features +- VMs created by `podman machine` can now use the native Apple hypervisor (`applehv`) when run on MacOS. +- A new command has been added, `podman machine reset`, which will remove all existing `podman machine` VMs and relevant configurations. +- The `podman manifest add` command now supports a new `--artifact` option to add OCI artifacts to a manifest list. +- The `podman create`, `podman run`, and `podman push` commands now support the `--retry` and `--retry-delay` options to configure retries for pushing and pulling images. +- The `podman run` and `podman exec` commands now support a new option, `--preserve-fd`, which allows passing a list of file descriptors into the container (as an alternative to `--preserve-fds`, which passes a specific number of file descriptors). +- Quadlet now supports templated units ([#17744](https://github.com/containers/podman/discussions/17744)). +- The `podman kube play` command can now create image-based volumes using the `volume.podman.io/image` annotation. +- Containers created with `podman kube play` can now include volumes from other containers (similar to the `--volumes-from` option) using a new annotation, `io.podman.annotations.volumes-from` ([#16819](https://github.com/containers/podman/issues/16819)). +- Pods created with `podman kube play` can now set user namespace options through the `io.podman.annotations.userns` annotation in the pod definition ([#20658](https://github.com/containers/podman/issues/20658)). +- Macvlan and ipvlan networks can adjust the name of the network interface created inside containers via the new `containers.conf` field `interface_name` ([#21313](https://github.com/containers/podman/issues/21313)). +- The `--gpus` option to `podman create` and `podman run` is now compatible with Nvidia GPUs ([#21156](https://github.com/containers/podman/issues/21156)). +- The `--mount` option to `podman create` and `podman run` supports a new mount option, `no-dereference`, to mount a symlink (instead of its dereferenced target) into a container ([#20098](https://github.com/containers/podman/issues/20098)). +- Podman now supports a new global option, `--config`, to point to a Docker configuration where we can source registry login credentials. +- The `podman ps --format` command now supports a new format specifier, `.Label` ([#20957](https://github.com/containers/podman/issues/20957)). +- The `uidmapping` and `gidmapping` options to the `podman run --userns=auto` option can now map to host IDs by prefixing host IDs with the `@` symbol. +- Quadlet now supports systemd-style drop-in directories. +- Quadlet now supports creating pods via new `.pod` unit files ([#17687](https://github.com/containers/podman/discussions/17687)). +- Quadlet now supports two new keys, `Entrypoint` and `StopTimeout`, in `.container` files ([#20585](https://github.com/containers/podman/issues/20585) and [#21134](https://github.com/containers/podman/issues/21134)). +- Quadlet now supports specifying the `Ulimit` key multiple times in `.container` files to set more than one ulimit on a container. +- Quadlet now supports setting the `Notify` key to `healthy` in `.container` files, to only sdnotify that a container has started when its health check begins passing ([#18189](https://github.com/containers/podman/issues/18189)). + +### Breaking Changes +- The backend for the `podman machine` commands has seen extensive rewrites. Configuration files have changed format and VMs from Podman 4.x and earlier are no longer usable. `podman machine` VMs must be recreated with Podman 5. +- The `podman machine init` command now pulls images as OCI artifacts, instead of using HTTP. As a result, a valid `policy.json` file is required on the host. Windows and Mac installers have been changed to install this file. +- QEMU is no longer a supported VM provider for `podman machine` on Mac. Instead, the native Apple hypervisor is supported. +- The `ConfigPath` and `Image` fields are no longer provided by the `podman machine inspect` command. Users can also no longer use `{{ .ConfigPath }}` or `{{ .Image }}` as arguments to `podman machine inspect --format`. +- The output of `podman inspect` for containers has seen a number of breaking changes to improve Docker compatibility, including changing `Entrypoint` from a string to an array of strings and StopSignal from an int to a string. +- The `podman inspect` command for containers now returns nil for healthchecks when inspecting containers without healthchecks. +- The `podman pod inspect` command now outputs a JSON array regardless of the number of pods inspected (previously, inspecting a single pod would omit the array). +- It is no longer possible to create new BoltDB databases; attempting to do so will result in an error. All new Podman installations will now use the SQLite database backend. Existing BoltDB databases remain usable. +- Support for CNI networking has been gated by a build tag and will not be enabled by default. +- Podman will now print warnings when used on cgroups v1 systems. Support for cgroups v1 is deprecated and will be removed in a future release. The `PODMAN_IGNORE_CGROUPSV1_WARNING` environment variable can be set to suppress warnings. +- Network statistics sent over the Docker API are now per-interface, and not aggregated, improving Docker compatibility. +- The default tool for rootless networking has been swapped from `slirp4netns` to `pasta` for improved performance. As a result, networks named `pasta` are no longer supported. +- The `--image` option replaces the now deprecated `--image-path` option for `podman machine init`. +- The output of `podman events --format "{{json .}}"` has been changed to improve Docker compatibility, including the `time` and `timeNano` fields ([#14993](https://github.com/containers/podman/issues/14993)). +- The name of `podman machine` VMs and the username used within the VM are now validated and must match this regex: `[a-zA-Z0-9][a-zA-Z0-9_.-]*`. +- Using multiple filters with the List Images REST API now combines the filters with AND instead of OR, improving Docker compatibility ([#18412](https://github.com/containers/podman/issues/18412)). +- The parsing for a number of Podman CLI options which accept arrays has been changed to no longer accept string-delineated lists, and instead to require the option to be passed multiple times. These options are `--annotation` to `podman manifest annotate` and `podman manifest add`, the `--configmap`, `--log-opt`, and `--annotation` options to `podman kube play`, the `--pubkeysfile` option to `podman image trust set`, the `--encryption-key` and `--decryption-key` options to `podman create`, `podman run`, `podman push` and `podman pull`, the `--env-file` option to `podman exec`, the `--bkio-weight-device`, `--device-read-bps`, `--device-write-bps` `--device-read-iops`, `--device-write-iops`, `--device`, `--label-file`, `--chrootdirs`, `--log-opt`, and `--env-file` options to `podman create` and `podman run`, and the `--hooks-dir` and `--module` global options. + +### Changes +- The `podman system reset` command no longer waits for running containers to gracefully stop, and instead immediately sends SIGKILL ([#21874](https://github.com/containers/podman/issues/21874)). +- The `podman network inspect` command now includes running containers using the network in its output ([#14126](https://github.com/containers/podman/issues/14126)). +- The `podman compose` command is now supported on non-AMD64/ARM64 architectures. +- VMs created by `podman machine` will now pass HTTP proxy environment variables into the VM for all providers. +- The `--no-trunc` option to the `podman kube play` and `podman kube generate` commands has been deprecated. Podman now complies to the Kubernetes specification for annotation size, removing the need for this option. +- The `DOCKER_HOST` environment variable will be set by default for rootless users when podman-docker is installed. +- Connections from `podman system connection` and farms from `podman farm` are now written to a new configuration file called `podman-connections.conf`. As a result, Podman no longer writes to `containers.conf`. Existing connections from `containers.conf` will still be respected. +- Most `podman farm` subcommands (save for `podman farm build`) no longer need to connect to the machines in the farm to run. +- The `podman create` and `podman run` commands no longer require specifying an entrypoint on the command line when the container image does not define one. In this case, an empty command will be passed to the OCI runtime, and the resulting behavior is runtime-specific. +- The default SELinux label for content mounted from the host in `podman machine` VMs on Mac is now `system_u:object_r:nfs_t:s0` so that it can be shared with all containers without issue. +- Newly-created VMs created by `podman machine` will now share a single SSH key key for access. As a result, `podman machine rm --save-keys` is deprecated as the key will persist by default. + +### Bugfixes +- Fixed a bug where the `podman stats` command would not show network statistics when the `pasta` network mode was used. +- Fixed a bug where `podman machine` VMs using the HyperV provider could not mount shares on directories that did not yet exist. +- Fixed a bug where the `podman compose` command did not respect the `--connection` and `--url` options. +- Fixed a bug where the `podman stop -t -1` command would wait for 0 seconds, not infinite seconds, before sending SIGKILL ([#21811](https://github.com/containers/podman/issues/21811)). +- Fixed a bug where Podman could deadlock when cleaning up a container when the `slirp4netns` network mode was used with a restart policy of `always` or `unless-stopped` or `on-failure` and a user namespace ([#21477](https://github.com/containers/podman/issues/21477)). +- Fixed a bug where uninstalling Podman on Mac did not remove the `docker.sock` symlink ([#20650](https://github.com/containers/podman/issues/20650)). +- Fixed a bug where preexisting volumes being mounted into a new container using a path that exists in said container would not be properly chowned ([#21608](https://github.com/containers/podman/issues/21608)). +- Fixed a bug where the `podman image scp` command could fail if there was not sufficient space in the destination machine's `/tmp` for the image ([#21239](https://github.com/containers/podman/issues/21239)). +- Fixed a bug where containers killed by running out of memory (including due to a memory limit) were not properly marked as OOM killed in `podman inspect` ([#13102](https://github.com/containers/podman/issues/13102)). +- Fixed a bug where `podman kube play` did not create memory-backed emptyDir volumes using a tmpfs filesystem. +- Fixed a bug where containers started with `--rm` were sometimes not removed after a reboot ([#21482](https://github.com/containers/podman/issues/21482)). +- Fixed a bug where the `podman events` command using the remote Podman client did not display the network name associated with network events ([#21311](https://github.com/containers/podman/issues/21311)). +- Fixed a bug where the `podman farm build` did not properly handle the `--tls-verify` option and would override server defaults even if the option was not set by the user ([#21352](https://github.com/containers/podman/issues/21352)). +- Fixed a bug where the `podman inspect` command could segfault on FreeBSD ([#21117](https://github.com/containers/podman/issues/21117)). +- Fixed a bug where Quadlet did not properly handle comment lines ending with a backslash ([#21555](https://github.com/containers/podman/issues/21555)). +- Fixed a bug where Quadlet would sometimes not report errors when malformed quadlet files were present. +- Fixed a bug where Quadlet could hang when given a `.container` file with certain types of trailing whitespace ([#21109](https://github.com/containers/podman/issues/21109)). +- Fixed a bug where Quadlet could panic when generating from Kubernetes YAML containing the `bind-mount-options` key ([#21080](https://github.com/containers/podman/issues/21080)). +- Fixed a bug where Quadlet did not properly strip quoting from values in `.container` files ([#20992](https://github.com/containers/podman/issues/20992)). +- Fixed a bug where the `--publish-all` option to `podman kube play` did not function when used with the remote Podman client. +- Fixed a bug where the `podman kube play --build` command could not build images whose Dockerfile specified an image from a private registry with a self-signed certificate in a `FROM` directive ([#20890](https://github.com/containers/podman/discussions/20890)). +- Fixed a bug where container remove events did not have the correct exit code set ([#19124](https://github.com/containers/podman/issues/19124)). + +### API +- A new API endpoint, `/libpod/images/$name/resolve`, has been added to resolve a (potential) short name to a list of fully-qualified image references Podman which could be used to pull the image. +- Fixed a bug where the List API for Images did not properly handle filters and would discard all but the last listed filter. +- Fixed a bug in the Docker Create API for Containers where entries from `/etc/hosts` were copied into create containers, resulting in incompatibility with network aliases. +- The API bindings have been refactored to reduce code size, leading to smaller binaries ([#17167](https://github.com/containers/podman/issues/17167)). + +### Misc +- Failed image pulls will now generate an event including the error. +- Updated Buildah to v1.35.0 +- Updated the containers/image library to v5.30.0 +- Updated the containers/storage library to v1.53.0 +- Updated the containers/common library to v0.58.0 +- Updated the libhvee library to v0.7.0 + +## 4.9.3 +### Features +- The `podman container commit` command now features a `--config` option which accepts a filename containing a JSON-encoded container configuration to be merged in to the newly-created image. + ## 4.9.2 ### Security - This release addresses a number of Buildkit vulnerabilities including but not limited to: [CVE-2024-23651](https://github.com/advisories/GHSA-m3r6-h7wv-7xxv), [CVE-2024-23652](https://github.com/advisories/GHSA-4v98-7qmw-rqr8), and [CVE-2024-23653](https://github.com/advisories/GHSA-wr6v-9f75-vh2g). @@ -1054,7 +1246,7 @@ - The `podman rmi` command now supports a new option, `--ignore`, which will ignore errors caused by missing images. - The `podman network create` command now features a new option, `--ipam-driver`, to specify details about how IP addresses are assigned to containers in the network ([#13521](https://github.com/containers/podman/issues/13521)). - The `podman machine list` command now features a new option, `--quiet`, to print only the names of configured VMs and no other information. -- The `--ipc` option to the `podman create`, `podman run`, and `podman pod create` commands now supports three new modes: `none`, `private`, and `shareable`. The default IPC mode is now `shareable`, indicating the the IPC namespace can be shared with other containers ([#13265](https://github.com/containers/podman/issues/13265)). +- The `--ipc` option to the `podman create`, `podman run`, and `podman pod create` commands now supports three new modes: `none`, `private`, and `shareable`. The default IPC mode is now `shareable`, indicating the IPC namespace can be shared with other containers ([#13265](https://github.com/containers/podman/issues/13265)). - The `--mount` option to the `podman create` and `podman run` commands can now set options for created named volumes via the `volume-opt` parameter ([#13387](https://github.com/containers/podman/issues/13387)). - The `--mount` option to the `podman create` and `podman run` commands now allows parameters to be passed in CSV format ([#13922](https://github.com/containers/podman/issues/13922)). - The `--userns` option to the `podman create` and `podman run` commands now supports a new option, `nomap`, that (only for rootless containers) does not map the UID of the user that started the container into the container, increasing security. @@ -2038,7 +2230,7 @@ - The `podman generate kube` command now properly supports generating YAML for containers and pods creating using host networking (`--net=host`) ([#9077](https://github.com/containers/podman/issues/9077)). - The `podman kill` command now supports a `--cidfile` option to kill containers given a file containing the container's ID ([#8443](https://github.com/containers/podman/issues/8443)). - The `podman pod create` command now supports the `--net=none` option ([#9165](https://github.com/containers/podman/issues/9165)). -- The `podman volume create` command can now specify volume UID and GID as options with the `UID` and `GID` fields passed to the the `--opt` option. +- The `podman volume create` command can now specify volume UID and GID as options with the `UID` and `GID` fields passed to the `--opt` option. - Initial support has been added for Docker Volume Plugins. Podman can now define available plugins in `containers.conf` and use them to create volumes with `podman volume create --driver`. - The `podman run` and `podman create` commands now support a new option, `--platform`, to specify the platform of the image to be used when creating the container. - The `--security-opt` option to `podman run` and `podman create` now supports the `systempaths=unconfined` option to unrestrict access to all paths in the container, as well as `mask` and `unmask` options to allow more granular restriction of container paths. @@ -2991,7 +3183,7 @@ - The `podman rm` command can now remove containers in broken states which previously could not be removed - The `podman info` command, when run without root, now shows information on UID and GID mappings in the rootless user namespace - Added `podman build --squash-all` flag, which squashes all layers (including those of the base image) into one layer -- The `--systemd` flag to `podman run` and `podman create` now accepts a string argument and allows a new value, `always`, which forces systemd support without checking if the the container entrypoint is systemd +- The `--systemd` flag to `podman run` and `podman create` now accepts a string argument and allows a new value, `always`, which forces systemd support without checking if the container entrypoint is systemd ### Bugfixes - Fixed a bug where the `podman top` command did not work on systems using CGroups V2 ([#4192](https://github.com/containers/podman/issues/4192)) diff --git a/RELEASE_PROCESS.md b/RELEASE_PROCESS.md index a4226ec29f..ac12f84cec 100644 --- a/RELEASE_PROCESS.md +++ b/RELEASE_PROCESS.md @@ -314,6 +314,29 @@ spelled with complete minutiae. -down. Click the drop-down and specify the version number in the dialog that appears. +1. Update Cirrus-CI cron job list + 1. After any Major or significant minor (esp. `-rhel`) releases, it's critical to + maintain the Cirrus-CI cron job list. This applies to all containers-org repos, + not just podman. + 1. Access the repo. settings WebUI by navigating to + `https://cirrus-ci.com/github/containers/` + and clicking the gear-icon in the upper-right. + 1. For minor (i.e. **NOT** `-rhel`) releases, (e.x. `vX.Y`), the previous release + should be removed from rotation (e.x. `vX.`) assuming it's no longer supported. + Simply click the trash-can icon to the right of the job definition. + 1. For `-rhel` releases, these are tied to products with specific EOL dates. They should + *never* be disabled unless you (and a buddy) are *absolutely* certain the product is EOL + and will *never* ever see another backport (CVE or otherwise). + 1. On the settings page, pick a "less used" time-slot based on the currently defined + jobs. For example, if three jobs specify `12 12 12 ? * 1-6`, choose another. Any + spec. `H`/`M`/`S` value between 12 and 22 is acceptable (e.x. `22 22 22 ? * 1-6`). + The point is to not overload the clouds with CI jobs. + 1. Following the pattern of the already defined jobs, at the bottom of the settings + page add a new entry. The "Name" should reflect the version number, the "Branch" + is simply the newly created release branch name (must be exact), and the "Expression" + is the time slot you selected (copy-paste). + 1. Click the "+" button next to the new-job row you just filled out. + 1. Announce the release 1. For major and minor releases, write a blog post and publish it to blogs.podman.io Highlight key features and important changes or fixes. Link to the GitHub release. diff --git a/build_windows.md b/build_windows.md index 3a95b45b96..5a93faca7b 100644 --- a/build_windows.md +++ b/build_windows.md @@ -1,107 +1,474 @@ # Building the Podman client and client installer on Windows -The following describes the process for building the Podman client on Windows. +The following describes the process for building and testing the Podman Windows +client (`podman.exe`) and the Podman Windows installer (`podman-setup.exe`) on +Windows. + +## Topics + +- [Requirements](#requirements) + - [OS requirements](#os-requirements) + - [Git and go](#git-and-go) + - [Pandoc](#pandoc) + - [WiX Toolset v3](#wix-toolset-v3) + - [Virtualization Provider](#virtualization-provider) + - [WSL](#wsl) + - [Hyper-V](#hyper-v) +- [Get the source code](#get-the-source-code) + - [Allow local PowerShell scripts execution](#allow-local-powershell-scripts-execution) +- [Build and test the Podman client for Windows](#build-and-test-the-podman-client-for-windows) + - [Build the Podman client](#build-the-podman-client) + - [Download gvproxy.exe and win-sshproxy.exe](#download-gvproxyexe-and-win-sshproxyexe) + - [Create a configuration file (optional)](#create-a-configuration-file-optional) + - [Create and start a podman machine](#create-and-start-a-podman-machine) + - [Run a container using podman](#run-a-container-using-podman) +- [Build and test the Podman Windows installer](#build-and-test-the-podman-windows-installer) + - [Build the installer](#build-the-installer) + - [Test the installer](#test-the-installer) + - [Build and test the standalone `podman.msi` file](#build-and-test-the-standalone-podmanmsi-file) + - [Verify the installation](#verify-the-installation) + - [Uninstall and clean-up](#uninstall-and-clean-up) +- [Validate changes before submitting a PR](#validate-changes-before-submitting-a-pr) + - [winmake lint](#winmake-lint) + - [winmake validatepr](#winmake-validatepr) + +## Requirements + +### OS requirements + +This documentation assumes one uses a Windows 10 or 11 development machine and a +PowerShell terminal. + +### Git and go + +To build Podman, the [git](https://gitforwindows.org/) and [go](https://go.dev) +tools are required. In case they are not yet installed, open a Windows +PowerShell terminal and run the following command (it assumes that +[winget](https://learn.microsoft.com/en-us/windows/package-manager/winget/) is +installed): + +```pwsh +winget install -e GoLang.Go Git.Git +``` + +:information_source: A terminal restart is advised for the `PATH` to be +reloaded. This can also be manually changed by configuring the `PATH`: -## OS requirements +```pwsh +$env:Path += ";C:\Program Files\Go\bin\;C:\Program Files\Git\cmd\" +``` -Windows OS can behave very differently depending on how it was configured. This documentation assumes that one is using -a [Windows 11 development machine](https://developer.microsoft.com/en-us/windows/downloads/virtual-machines/) or a -configuration close to this one. The Podman Windows client installer bundles several tools, which are unnecessary for Podman builds, but this -set of packages is well aligned with GitHub's `windows-latest` offerings. Some of the tools will still be missing from -this distribution and will have to be manually added after this installation completes. +### Pandoc -## Install Pandoc +[Pandoc](https://pandoc.org/) is used to generate Podman documentation. It is +required for building the documentation and the +[bundle installer](#build-the-installer). It can be avoided when building and +testing the +[Podman client for Windows](#build-and-test-the-podman-client-for-windows) or +[the standalone `podman.msi` installer](#build-and-test-the-standalone-podmanmsi-file). +Pandoc can be installed from https://pandoc.org/installing.html. When performing +the Pandoc installation one, has to choose the option "Install for all users" +(to put the binaries into "Program Files" directory). -Pandoc could be installed from https://pandoc.org/installing.html When performing the Pandoc installation one, has to choose the option -"Install for all users" (to put the binaries into "Program Files" directory). +### WiX Toolset v3 -## Install WiX Toolset v3 (is preinstalled in GitHub runner) -The latest release of the WiX Toolset can be obtained from https://wixtoolset.org/docs/wix3/. Installing it into a clean VM might require +[WiX Toolset](https://wixtoolset.org) **v3** is used to develop and build the +Podman Windows installer. It's not required for the Podman Windows client. +Version 3 of the WiX Toolset can be obtained from +https://wixtoolset.org/docs/wix3/. Installing it into a clean VM might require an additional installation of .NET Framework 3.5 in advance ([instructions for adding .NET Framework 3.5 via enabling the Windows feature](https://learn.microsoft.com/en-us/dotnet/framework/install/dotnet-35-windows#enable-the-net-framework-35-in-control-panel)) -## Install msys2 +### Virtualization Provider -Podman requires brew -- a collection of Unix like build tools and libraries adapted for Windows. More details and -installation instructions are available from their [home page](https://www.msys2.org/). There are also premade GitHub -actions for this tool that are available. +Running Podman on Windows requires a virtualization provider. The supported +providers are the +[Windows Subsystem for Linux (WSL)](https://learn.microsoft.com/en-us/windows/wsl/) +and +[Hyper-V](https://learn.microsoft.com/en-us/virtualization/hyper-v-on-windows/quick-start/enable-hyper-v). +At least one of those two is required to test podman on a local Windows machine. -## Install build dependencies +#### WSL -Podman requires some software from msys2 to be able to build. This can be done using msys2 shell. One can start it -from the Start menu. This documentation covers only usage of MSYS2 UCRT64 shell (msys2 shells come preconfigured for -different [environments](https://www.msys2.org/docs/environments/)). +WSL can be installed on Windows 10 and Windows 11, including Windows Home, with +the following command, from a PowerShell or Windows Command Prompt terminal in +**administrator mode**: +```pwsh +wsl --install ``` -$ pacman -S git make zip mingw-w64-ucrt-x86_64-gcc mingw-w64-ucrt-x86_64-go mingw-w64-ucrt-x86_64-python + +For more information refer to +[the official documentation](https://learn.microsoft.com/en-us/windows/wsl/). + +#### Hyper-V + +Hyper-V is an optional feature of Windows Enterprise, Pro, or Education (not +Home). It is available on Windows 10 and 11 only and +[has some particular requirements in terms of CPU and memory](https://learn.microsoft.com/en-us/virtualization/hyper-v-on-windows/quick-start/enable-hyper-v#check-requirements). +To enable it on a supported system, enter the following command: + +```pwsh +Enable-WindowsOptionalFeature -Online -FeatureName Microsoft-Hyper-V -All ``` -The Pandoc tool installed in a prior step is specific, that is the installer doesn't add the tool to any PATH environment -variable known to msys2, so, it has to be linked explicitly to work. +After running this command, a restart of the Windows machine is required. +:information_source: Configure the VM provider used by podman (Hyper-V or WSL) +in the file `%PROGRAMDATA%/containers/containers.conf`. +[More on that later](#create-a-configuration-file-optional). + +## Get the source code + +Open a Windows Terminal and run the following command: + +```pwsh +git config --global core.autocrlf false ``` -$ mkdir -p /usr/local/bin -$ ln -sf "/c/Program Files/Pandoc/pandoc.exe" "/usr/local/bin/pandoc.exe" + +It configures git so that it does **not** automatically convert LF to CRLF. In +the Podman git repository, files are expected to use Unix LF rather than Windows +CRLF. + +Then run the command to clone the Podman git repository: + +```pwsh +git clone https://github.com/containers/podman ``` -## Restart shell (important) +It creates the folder `podman` in the current directory and clones the Podman +git repository into it. + +### Allow local PowerShell scripts execution + +A developer can build the Podman client for Windows and the Windows installer +with the PowerShell script +[winmake.ps1](https://github.com/containers/podman/blob/main/winmake.ps1). + +Windows sets the ExecutionPolicy to `Restricted` by default; running scripts is +prohibited. Determine the ExecutionPolicy on the machine with this command: + +```pwsh +Get-ExecutionPolicy +``` + +If the command returns `Restricted`, the ExecutionPolicy should be changed to +`RemoteSigned`: + +```pwsh +Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser +``` + +This policy allows the execution of local PowerShell scripts, such as +`winmake.ps1`, for the current user. + +## Build and test the Podman client for Windows + +The following steps describe how to build the `podman.exe` binary from sources +and test it. + +### Build the Podman client -One needs to restart the [msys2](https://www.msys2.org/) shell after dependency installation before proceeding with the build. +Open a PowerShell terminal and move to Podman local git repository directory: -## Obtain Podman source code +```pwsh +Set-Location .\podman +``` -One can obtain the latest source code for Podman from its [GitHub](https://github.com/containers/podman) repository. +Build `podman.exe` ``` -$ git clone https://github.com/containers/podman.git go/src/github.com/containers/podman +.\winmake.ps1 podman-remote ``` -## Build client +:information_source: Verify build's success by checking the content of the +`.\bin\windows` folder. Upon successful completion, the executable `podman.exe` +should be there: + +```pwsh +Get-ChildItem .\bin\windows\ + + + Directory: C:\Users\mario\Git\podman\bin\windows -After completing the preparatory steps of obtaining the Podman source code and installing its dependencies, the client -can now be built. +Mode LastWriteTime Length Name +---- ------------- ------ ---- +-a---- 2/27/2024 11:59 AM 45408256 podman.exe ``` -$ cd go/src/github.com/containers/podman -$ make clean podman-remote-release-windows_amd64.zip + +### Download gvproxy.exe and win-sshproxy.exe + +[gvisor-tap-vsock](https://github.com/containers/gvisor-tap-vsock/) binaries +(`gvproxy-windowsgui.exe` and `win-sshproxy.exe`) are required to run the Podman +client on Windows. The executables are expected to be in the same folder as +`podman.exe`. The following command downloads the latest version in the +`.\bin\windows\` folder: + +```pwsh +.\winmake.ps1 win-gvproxy ``` -The complete distribution will be packaged to the `podman-remote-release-windows_amd64.zip` file. It is possible to -unzip it and replace files in the default Podman installation with the built ones to use the custom build. +:information_source: To verify that the binaries have been downloaded +successfully, check the content of the .\bin\windows` folder. + +```pwsh +Get-ChildItem .\bin\windows\ + + + Directory: C:\Users\mario\Git\podman\bin\windows + + +Mode LastWriteTime Length Name +---- ------------- ------ ---- +-a---- 2/29/2024 12:10 PM 10946048 gvproxy.exe +-a---- 2/27/2024 11:59 AM 45408256 podman.exe +-a---- 2/29/2024 12:10 PM 4089856 win-sshproxy.exe +``` -### Build client only (for faster feedback loop) +### Create a configuration file (optional) -Building Podman by following this documentation can take a fair amount of time and effort. Packaging the installer adds even more overhead. If -the only needed artifact is the Podman binary itself, it is possible to build only it with this command: +To test some particular configurations of Podman, create a `containers.conf` +file: ``` -$ make podman-remote +New-Item -ItemType Directory $env:PROGRAMDATA\containers\ +New-Item -ItemType File $env:PROGRAMDATA\containers\containers.conf +notepad $env:PROGRAMDATA\containers\containers.conf ``` -The binary will be located in `bin/windows/`. It could be used as drop in replacement for the installed version of -Podman. +For example, to test with Hyper-V as the virtualization provider, use the +following content: -It is also possible to cross-build for other platforms by providing GOOS and GOARCH environment variables. +```toml +[machine] +provider="hyperv" +``` + +Find the complete list of configuration options in the +[documentation](https://github.com/containers/common/blob/main/docs/containers.conf.5.md). -## Build client installer +### Create and start a podman machine -As Windows requires more effort in comparison to Unix systems for installation procedures, it is sometimes -easier to pack the changes into a ready-to-use installer. To create the installer, the full client distribution in ZIP -format has to be built beforehand. +Execute the following commands in a terminal to create a Podman machine: +```pwsh +.\bin\windows\podman.exe machine init ``` -$ export BUILD_PODMAN_VERSION=$(test/version/version | sed 's/-.*//') -$ mkdir -p contrib/win-installer/current -$ cp podman-remote-release-windows_amd64.zip contrib/win-installer/current/ -$ cd contrib/win-installer -$ powershell -ExecutionPolicy Bypass -File build.ps1 $BUILD_PODMAN_VERSION dev current + +When `machine init` completes, run `machine start`: + +```pwsh +.\bin\windows\podman.exe machine start ``` -The installer will be located in the `contrib/win-installer` folder (relative to checkout root) and will have a name -like `podman-4.5.0-dev-setup.exe`. This could be installed in a similar manner as the official Podman for Windows installers -(when installing unsigned binaries is allowed on the host). +:information_source: If the virtualization provider is Hyperv-V, execute the +above commands in an administrator terminal. + +### Run a container using podman + +Use the locally built Podman client for Windows to run containers: -## Using the client +```pwsh +.\bin\windows\podman.exe run hello-world +``` To learn how to use the Podman client, refer to its [tutorial](https://github.com/containers/podman/blob/main/docs/tutorials/remote_client.md). + +## Build and test the Podman Windows installer + +The Podman Windows installer (e.g., `podman-5.1.0-dev-setup.exe`) is a bundle +that includes an msi package (`podman.msi`) and installs the WSL kernel +(`podman-wslkerninst.exe`). It's built using the +[WiX Toolset](https://wixtoolset.org/) and the +[PanelSwWixExtension](https://github.com/nirbar/PanelSwWixExtension/tree/wix3-v3.11.1.353) +WiX extension. The source code is in the folder `contrib\win-installer`. + +### Build the Windows installer + +To build the installation bundle, run the following command: + +```pwsh +.\winmake.ps1 installer +``` + +:information_source: making `podman-remote`, `win-gvproxy`, and `docs` is +required before running this command. + +Locate the installer in the `contrib\win-installer` folder (relative to checkout +root) with a name like `podman-5.2.0-dev-setup.exe`. + +The `installer` target of `winmake.ps1` runs the script +`contrib\win-installer\build.ps1` that, in turns, executes: + +- `build-hooks.bat`: builds `podman-wslkerninst.exe` (WSL kernel installer) and + `podman-msihooks.dll` (helper that checks if WSL and Hyper-V are installed). +- `build-msi.bat`: builds `podman.msi` from the WiX source files `podman.wxs`, + `pages.wxs`, `podman-ui.wxs` and `welcome-install-dlg.wxs`. +- `build-burn.bat`: builds `podman-setup.exe` file from + [WiX Burn bundle](https://wixtoolset.org/docs/tools/burn/) `burn.wxs`. + +### Test the Windows installer + +Double-click on the Windows installer to run it. To get the installation logs +with debug information, running it via the command line is recommended: + +```pwsh +contrib\win-installer\podman-5.1.0-dev-setup.exe /install /log podman-setup.log +``` + +It generates the files `podman-setup.log` and `podman-setup_000_Setup.log`, +which include detailed installation information, in the current directory. + +Run it in `quiet` mode to automate the installation and avoid interacting with +the GUI. Open the terminal **as an administrator**, add the `/quiet` option, and +set the bundle variables `MachineProvider` (`wsl` or `hyperv`), `WSLCheckbox` +(`1` to install WSL as part of the installation, `0` otherwise), and +`HyperVCheckbox` (`1` to install Hyper-V as part of the installation, `0` +otherwise): + +```pwsh +contrib\win-installer\podman-5.1.0-dev-setup.exe /install /log podman-setup.log /quiet MachineProvider=wsl WSLCheckbox=0 HyperVCheckbox=0 +``` + +### Build and test the standalone `podman.msi` file + +Building and testing the standalone `podman.msi` package during development may +be useful. Even if this package is not published as a standalone file when +Podman is released (it's included in the `podman-setup.exe` bundle), it can be +faster to build and test that rather than the full bundle during the development +phase. + +Run the script `contrib\win-installer\build-msi.bat` to build the standalone +`podman.msi` file: + +```pwsh +Push-Location .\contrib\win-installer\ +.\build-msi.bat 9.9.9 +Pop-Location +``` + +It creates the file `.\contrib\win-installer\podman.msi`. Test it using the +[Microsoft Standard Installer](https://learn.microsoft.com/en-us/windows/win32/msi/standard-installer-command-line-options) +command line tool: + +```pwsh +msiexec /package contrib\win-installer\podman.msi /l*v podman-msi.log +``` + +To run it in quiet, non-interactive mode, open the terminal **as an +administrator**, add the `/quiet` option, and set the MSI properties +`MACHINE_PROVIDER` (`wsl` or `hyperv`), `WITH_WSL` (`1` to install WSL as part +of the installation, `0` otherwise) and `WITH_HYPERV` (`1` to install Hyper-V as +part of the installation, `0` otherwise): + +```pwsh +msiexec /package contrib\win-installer\podman.msi /l*v podman-msi.log /quiet MACHINE_PROVIDER=wsl WITH_WSL=0 WITH_HYPERV=0 +``` + +:information_source: `podman.msi` GUI dialogs, defined in the file +`contrib\win-installer\podman-ui.wxs`, are distinct from the installation bundle +`podman-setup.exe` GUI dialogs, defined in +`contrib\win-installer\welcome-install-dlg.wxs`. + +### Verify the installation + +Inspect the msi installation log `podman-msi.log` (or +`podman-setup_000_Setup.log` if testing with the bundle) to verify that the +installation was successful: + +```pwsh +Select-String -Path "podman-msi.log" -Pattern "Installation success or error status: 0" +``` + +These commands too are helpful to check the installation: + +```pwsh +# Check the copy of the podman client in the Podman folder +Test-Path -Path "$ENV:PROGRAMFILES\RedHat\Podman\podman.exe" +# Check the generation of the podman configuration file +Test-Path -Path "$ENV:PROGRAMDATA\containers\containers.conf.d\99-podman-machine-provider.conf" +# Check that the installer configured the right provider +Get-Content '$ENV:PROGRAMDATA\containers\containers.conf.d\99-podman-machine-provider.conf' | Select -Skip 1 | ConvertFrom-StringData | % { $_.provider } +# Check the creation of the registry key +Test-Path -Path "HKLM:\SOFTWARE\Red Hat\Podman" +Get-ItemProperty "HKLM:\SOFTWARE\Red Hat\Podman" InstallDir +# Check the podman.exe is in the $PATH +$env:PATH | Select-String -Pattern "$ENV:PROGRAMFILES\RedHat\Podman" +``` + +:information_source: Podman CI uses script +`contrib\cirrus\win-installer-main.ps1`. Use it locally, too, to build and test +the installer. + +### Uninstall and clean-up + +Podman can be uninstalled from the Windows Control Panel or running the +following command from a terminal **as an administrator**: + +```pwsh +contrib\win-installer\podman-5.1.0-dev-setup.exe /uninstall /quiet /log podman-setup-uninstall.log +``` + +The uninstaller does not delete some folders. Clean them up manually: + +```pwsh +$extraFolders = @( + "$ENV:PROGRAMDATA\containers\" + "$ENV:LOCALAPPDATA\containers\" + "$env:USERPROFILE.config\containers\" + "$env:USERPROFILE.local\share\containers\" + ) +$extraFolders | ForEach-Object {Remove-Item -Recurse -Force $PSItem} +``` + +The following commands are helpful to verify that the uninstallation was +successful: + +```pwsh +# Inspect the uninstallation log for a success message +Select-String -Path "podman-setup-uninstall_000_Setup.log" -Pattern "Removal success or error status: 0" +# Check that the uninstaller removed Podman resources +$foldersToCheck = @( + "$ENV:PROGRAMFILES\RedHat\Podman\podman.exe" + "HKLM:\SOFTWARE\Red Hat\Podman" + "$ENV:PROGRAMDATA\containers\" + "$env:USERPROFILE.config\containers\" + "$env:USERPROFILE.local\share\containers\" + "$ENV:LOCALAPPDATA\containers\" + "$ENV:APPDATA\containers\containers.conf.d\99-podman-machine-provider.conf" +) +$foldersToCheck | ForEach-Object {Test-Path -Path $PSItem} +``` + +## Validate changes before submitting a PR + +The script `winmake.ps1` has a couple of targets to check the source code +statically. GitHub Pull request checks execute the same statical analysis. It is +highly recommended that you run them locally before submitting a PR. + +### winmake lint + +The `lint` target provides a fast validation target. It runs the following +tools: + +- `golangci-lint`: runs go-specific linters configured in + [`.golangci.yml`](.golangci.yml) +- `pre-commit`: runs more linters configured in + [`.pre-commit-config.yaml`](.pre-commit-config.yaml) + +:information_source: Install [golangci-lint](https://golangci-lint.run) and +[pre-commit](https://pre-commit.com) to run `winmake.ps1 lint`. + +### winmake validatepr + +Target `validatepr` performs a more exhaustive validation but takes +significantly more time to complete. It uses `podman` to run the target +`.validatepr` of the [Linux `Makefile`](Makefile). It builds Podman for Linux, +MacOS and Windows and then performs the same checks as the `lint` target plus +many more. + +:information_source: Create and start a Podman machine before running +`winmake.ps1 lint`. Configure the Podman machine with at least 4GB of memory: +`podman machine init -m 4096`. diff --git a/cmd/podman-mac-helper/install.go b/cmd/podman-mac-helper/install.go index 4aec4d95ac..62e9121202 100644 --- a/cmd/podman-mac-helper/install.go +++ b/cmd/podman-mac-helper/install.go @@ -14,12 +14,13 @@ import ( "syscall" "text/template" + "github.com/containers/storage/pkg/fileutils" "github.com/spf13/cobra" ) const ( - rwx_rx_rx = 0755 - rw_r_r = 0644 + mode755 = 0755 + mode644 = 0644 ) const launchConfig = ` @@ -91,7 +92,7 @@ func install(cmd *cobra.Command, args []string) error { labelName := fmt.Sprintf("com.github.containers.podman.helper-%s.plist", userName) fileName := filepath.Join("/Library", "LaunchDaemons", labelName) - if _, err := os.Stat(fileName); err == nil || !os.IsNotExist(err) { + if err := fileutils.Exists(fileName); err == nil || !errors.Is(err, fs.ErrNotExist) { fmt.Fprintln(os.Stderr, "helper is already installed, skipping the install, uninstall first if you want to reinstall") return nil } @@ -109,7 +110,7 @@ func install(cmd *cobra.Command, args []string) error { return err } - file, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_EXCL, rw_r_r) + file, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_EXCL, mode644) if err != nil { return fmt.Errorf("creating helper plist file: %w", err) } @@ -138,7 +139,7 @@ func restrictRecursive(targetDir string, until string) error { if err = os.Chown(targetDir, 0, 0); err != nil { return fmt.Errorf("could not update ownership of helper path: %w", err) } - if err = os.Chmod(targetDir, rwx_rx_rx|fs.ModeSticky); err != nil { + if err = os.Chmod(targetDir, mode755|fs.ModeSticky); err != nil { return fmt.Errorf("could not update permissions of helper path: %w", err) } targetDir = filepath.Dir(targetDir) @@ -205,7 +206,7 @@ func installExecutable(user string) (string, error) { } targetDir := filepath.Join(installPrefix, "podman", "helper", user) - if err := os.MkdirAll(targetDir, rwx_rx_rx); err != nil { + if err := os.MkdirAll(targetDir, mode755); err != nil { return "", fmt.Errorf("could not create helper directory structure: %w", err) } @@ -220,7 +221,7 @@ func installExecutable(user string) (string, error) { } install := filepath.Join(targetDir, filepath.Base(exec)) - return install, copyFile(install, exec, rwx_rx_rx) + return install, copyFile(install, exec, mode755) } func copyFile(dest string, source string, perms fs.FileMode) error { diff --git a/cmd/podman-mac-helper/main.go b/cmd/podman-mac-helper/main.go index 55aaa0e7c0..41b960e45a 100644 --- a/cmd/podman-mac-helper/main.go +++ b/cmd/podman-mac-helper/main.go @@ -72,7 +72,7 @@ func getUserInfo(name string) (string, string, string, error) { entry := readCapped(output) elements := strings.Split(entry, ":") if len(elements) < 9 || elements[0] != name { - return "", "", "", errors.New("Could not look up user") + return "", "", "", errors.New("could not look up user") } return elements[0], elements[2], elements[8], nil diff --git a/cmd/podman-mac-helper/uninstall.go b/cmd/podman-mac-helper/uninstall.go index 3896394cec..460516d56c 100644 --- a/cmd/podman-mac-helper/uninstall.go +++ b/cmd/podman-mac-helper/uninstall.go @@ -3,13 +3,14 @@ package main import ( + "errors" "fmt" + "io/fs" "os" "os/exec" "path/filepath" - "io/fs" - "errors" + "github.com/containers/storage/pkg/fileutils" "github.com/spf13/cobra" ) @@ -58,7 +59,7 @@ func uninstall(cmd *cobra.Command, args []string) error { } // Get the file information of dockerSock - if _, err := os.Lstat(dockerSock); err != nil { + if err := fileutils.Lexists(dockerSock); err != nil { // If the error is due to the file not existing, return nil if errors.Is(err, fs.ErrNotExist) { return nil @@ -67,7 +68,7 @@ func uninstall(cmd *cobra.Command, args []string) error { return fmt.Errorf("could not stat dockerSock: %v", err) } if target, err := os.Readlink(dockerSock); err != nil { - //Return an error if unable to read the symlink + // Return an error if unable to read the symlink return fmt.Errorf("could not read dockerSock symlink: %v", err) } else { // Check if the target of the symlink matches the expected target diff --git a/cmd/podman-testing/create.go b/cmd/podman-testing/create.go new file mode 100644 index 0000000000..1bdd14c5f9 --- /dev/null +++ b/cmd/podman-testing/create.go @@ -0,0 +1,127 @@ +package main + +import ( + "fmt" + + "github.com/containers/common/pkg/completion" + "github.com/containers/podman/v5/cmd/podman/validate" + "github.com/containers/podman/v5/internal/domain/entities" + "github.com/spf13/cobra" +) + +var ( + createStorageLayerDescription = `Create an unmanaged layer in local storage.` + createStorageLayerCmd = &cobra.Command{ + Use: "create-storage-layer [options]", + Args: validate.NoArgs, + Short: "Create an unmanaged layer", + Long: createStorageLayerDescription, + RunE: createStorageLayer, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman testing create-storage-layer`, + } + + createStorageLayerOpts entities.CreateStorageLayerOptions + + createLayerDescription = `Create an unused layer in local storage.` + createLayerCmd = &cobra.Command{ + Use: "create-layer [options]", + Args: validate.NoArgs, + Short: "Create an unused layer", + Long: createLayerDescription, + RunE: createLayer, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman testing create-layer`, + } + + createLayerOpts entities.CreateLayerOptions + + createImageDescription = `Create an image in local storage.` + createImageCmd = &cobra.Command{ + Use: "create-image [options]", + Args: validate.NoArgs, + Short: "Create an image", + Long: createImageDescription, + RunE: createImage, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman testing create-image`, + } + + createImageOpts entities.CreateImageOptions + + createContainerDescription = `Create a container in local storage.` + createContainerCmd = &cobra.Command{ + Use: "create-container [options]", + Args: validate.NoArgs, + Short: "Create a container", + Long: createContainerDescription, + RunE: createContainer, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman testing create-container`, + } + + createContainerOpts entities.CreateContainerOptions +) + +func init() { + mainCmd.AddCommand(createStorageLayerCmd) + flags := createStorageLayerCmd.Flags() + flags.StringVarP(&createStorageLayerOpts.ID, "id", "i", "", "ID to assign the new layer (default random)") + flags.StringVarP(&createStorageLayerOpts.Parent, "parent", "p", "", "ID of parent of new layer (default none)") + + mainCmd.AddCommand(createLayerCmd) + flags = createLayerCmd.Flags() + flags.StringVarP(&createLayerOpts.ID, "id", "i", "", "ID to assign the new layer (default random)") + flags.StringVarP(&createLayerOpts.Parent, "parent", "p", "", "ID of parent of new layer (default none)") + + mainCmd.AddCommand(createImageCmd) + flags = createImageCmd.Flags() + flags.StringVarP(&createImageOpts.ID, "id", "i", "", "ID to assign the new image (default random)") + flags.StringVarP(&createImageOpts.Layer, "layer", "l", "", "ID of image's main layer (default none)") + + mainCmd.AddCommand(createContainerCmd) + flags = createContainerCmd.Flags() + flags.StringVarP(&createContainerOpts.ID, "id", "i", "", "ID to assign the new container (default random)") + flags.StringVarP(&createContainerOpts.Image, "image", "b", "", "ID of containers's base image (default none)") + flags.StringVarP(&createContainerOpts.Layer, "layer", "l", "", "ID of containers's read-write layer (default none)") +} + +func createStorageLayer(cmd *cobra.Command, args []string) error { + results, err := testingEngine.CreateStorageLayer(mainContext, createStorageLayerOpts) + if err != nil { + return err + } + + fmt.Println(results.ID) + return nil +} + +func createLayer(cmd *cobra.Command, args []string) error { + results, err := testingEngine.CreateLayer(mainContext, createLayerOpts) + if err != nil { + return err + } + + fmt.Println(results.ID) + return nil +} + +func createImage(cmd *cobra.Command, args []string) error { + results, err := testingEngine.CreateImage(mainContext, createImageOpts) + if err != nil { + return err + } + + fmt.Println(results.ID) + return nil +} + +func createContainer(cmd *cobra.Command, args []string) error { + results, err := testingEngine.CreateContainer(mainContext, createContainerOpts) + if err != nil { + return err + } + + fmt.Println(results.ID) + return nil +} diff --git a/cmd/podman-testing/data.go b/cmd/podman-testing/data.go new file mode 100644 index 0000000000..6fe2099c00 --- /dev/null +++ b/cmd/podman-testing/data.go @@ -0,0 +1,405 @@ +package main + +import ( + "errors" + "os" + + "github.com/containers/common/pkg/completion" + "github.com/containers/podman/v5/cmd/podman/validate" + "github.com/containers/podman/v5/internal/domain/entities" + "github.com/spf13/cobra" +) + +var ( + createLayerDataDescription = `Create data for a layer in local storage.` + createLayerDataCmd = &cobra.Command{ + Use: "create-layer-data [options]", + Args: validate.NoArgs, + Short: "Create data for a layer", + Long: createLayerDataDescription, + RunE: createLayerData, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman testing create-layer-data`, + } + + createLayerDataOpts entities.CreateLayerDataOptions + createLayerDataKey string + createLayerDataValue string + createLayerDataFile string + + createImageDataDescription = `Create data for an image in local storage.` + createImageDataCmd = &cobra.Command{ + Use: "create-image-data [options]", + Args: validate.NoArgs, + Short: "Create data for an image", + Long: createImageDataDescription, + RunE: createImageData, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman testing create-image-data`, + } + + createImageDataOpts entities.CreateImageDataOptions + createImageDataKey string + createImageDataValue string + createImageDataFile string + + createContainerDataDescription = `Create data for a container in local storage.` + createContainerDataCmd = &cobra.Command{ + Use: "create-container-data [options]", + Args: validate.NoArgs, + Short: "Create data for a container", + Long: createContainerDataDescription, + RunE: createContainerData, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman testing create-container-data`, + } + + createContainerDataOpts entities.CreateContainerDataOptions + createContainerDataKey string + createContainerDataValue string + createContainerDataFile string + + modifyLayerDataDescription = `Modify data for a layer in local storage, corrupting it.` + modifyLayerDataCmd = &cobra.Command{ + Use: "modify-layer-data [options]", + Args: validate.NoArgs, + Short: "Modify data for a layer", + Long: modifyLayerDataDescription, + RunE: modifyLayerData, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman testing modify-layer-data`, + } + + modifyLayerDataOpts entities.ModifyLayerDataOptions + modifyLayerDataValue string + modifyLayerDataFile string + + modifyImageDataDescription = `Modify data for an image in local storage, corrupting it.` + modifyImageDataCmd = &cobra.Command{ + Use: "modify-image-data [options]", + Args: validate.NoArgs, + Short: "Modify data for an image", + Long: modifyImageDataDescription, + RunE: modifyImageData, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman testing modify-image-data`, + } + + modifyImageDataOpts entities.ModifyImageDataOptions + modifyImageDataValue string + modifyImageDataFile string + + modifyContainerDataDescription = `Modify data for a container in local storage, corrupting it.` + modifyContainerDataCmd = &cobra.Command{ + Use: "modify-container-data [options]", + Args: validate.NoArgs, + Short: "Modify data for a container", + Long: modifyContainerDataDescription, + RunE: modifyContainerData, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman testing modify-container-data`, + } + + modifyContainerDataOpts entities.ModifyContainerDataOptions + modifyContainerDataValue string + modifyContainerDataFile string + + removeLayerDataDescription = `Remove data from a layer in local storage, corrupting it.` + removeLayerDataCmd = &cobra.Command{ + Use: "remove-layer-data [options]", + Args: validate.NoArgs, + Short: "Remove data for a layer", + Long: removeLayerDataDescription, + RunE: removeLayerData, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman testing remove-layer-data`, + } + + removeLayerDataOpts entities.RemoveLayerDataOptions + + removeImageDataDescription = `Remove data from an image in local storage, corrupting it.` + removeImageDataCmd = &cobra.Command{ + Use: "remove-image-data [options]", + Args: validate.NoArgs, + Short: "Remove data from an image", + Long: removeImageDataDescription, + RunE: removeImageData, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman testing remove-image-data`, + } + + removeImageDataOpts entities.RemoveImageDataOptions + + removeContainerDataDescription = `Remove data from a container in local storage, corrupting it.` + removeContainerDataCmd = &cobra.Command{ + Use: "remove-container-data [options]", + Args: validate.NoArgs, + Short: "Remove data from a container", + Long: removeContainerDataDescription, + RunE: removeContainerData, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman testing remove-container-data`, + } + + removeContainerDataOpts entities.RemoveContainerDataOptions +) + +func init() { + mainCmd.AddCommand(createLayerDataCmd) + flags := createLayerDataCmd.Flags() + flags.StringVarP(&createLayerDataOpts.ID, "layer", "i", "", "ID of the layer") + flags.StringVarP(&createLayerDataKey, "key", "k", "", "Name of the data item") + flags.StringVarP(&createLayerDataValue, "value", "v", "", "Value of the data item") + flags.StringVarP(&createLayerDataFile, "file", "f", "", "File containing the data item") + + mainCmd.AddCommand(createImageDataCmd) + flags = createImageDataCmd.Flags() + flags.StringVarP(&createImageDataOpts.ID, "image", "i", "", "ID of the image") + flags.StringVarP(&createImageDataKey, "key", "k", "", "Name of the data item") + flags.StringVarP(&createImageDataValue, "value", "v", "", "Value of the data item") + flags.StringVarP(&createImageDataFile, "file", "f", "", "File containing the data item") + + mainCmd.AddCommand(createContainerDataCmd) + flags = createContainerDataCmd.Flags() + flags.StringVarP(&createContainerDataOpts.ID, "container", "i", "", "ID of the container") + flags.StringVarP(&createContainerDataKey, "key", "k", "", "Name of the data item") + flags.StringVarP(&createContainerDataValue, "value", "v", "", "Value of the data item") + flags.StringVarP(&createContainerDataFile, "file", "f", "", "File containing the data item") + + mainCmd.AddCommand(modifyLayerDataCmd) + flags = modifyLayerDataCmd.Flags() + flags.StringVarP(&modifyLayerDataOpts.ID, "layer", "i", "", "ID of the layer") + flags.StringVarP(&modifyLayerDataOpts.Key, "key", "k", "", "Name of the data item") + flags.StringVarP(&modifyLayerDataValue, "value", "v", "", "Value of the data item") + flags.StringVarP(&modifyLayerDataFile, "file", "f", "", "File containing the data item") + + mainCmd.AddCommand(modifyImageDataCmd) + flags = modifyImageDataCmd.Flags() + flags.StringVarP(&modifyImageDataOpts.ID, "image", "i", "", "ID of the image") + flags.StringVarP(&modifyImageDataOpts.Key, "key", "k", "", "Name of the data item") + flags.StringVarP(&modifyImageDataValue, "value", "v", "", "Value of the data item") + flags.StringVarP(&modifyImageDataFile, "file", "f", "", "File containing the data item") + + mainCmd.AddCommand(modifyContainerDataCmd) + flags = modifyContainerDataCmd.Flags() + flags.StringVarP(&modifyContainerDataOpts.ID, "container", "i", "", "ID of the container") + flags.StringVarP(&modifyContainerDataOpts.Key, "key", "k", "", "Name of the data item") + flags.StringVarP(&modifyContainerDataValue, "value", "v", "", "Value of the data item") + flags.StringVarP(&modifyContainerDataFile, "file", "f", "", "File containing the data item") + + mainCmd.AddCommand(removeLayerDataCmd) + flags = removeLayerDataCmd.Flags() + flags.StringVarP(&removeLayerDataOpts.ID, "layer", "i", "", "ID of the layer") + flags.StringVarP(&removeLayerDataOpts.Key, "key", "k", "", "Name of the data item") + + mainCmd.AddCommand(removeImageDataCmd) + flags = removeImageDataCmd.Flags() + flags.StringVarP(&removeImageDataOpts.ID, "image", "i", "", "ID of the image") + flags.StringVarP(&removeImageDataOpts.Key, "key", "k", "", "Name of the data item") + + mainCmd.AddCommand(removeContainerDataCmd) + flags = removeContainerDataCmd.Flags() + flags.StringVarP(&removeContainerDataOpts.ID, "container", "i", "", "ID of the container") + flags.StringVarP(&removeContainerDataOpts.Key, "key", "k", "", "Name of the data item") +} + +func createLayerData(cmd *cobra.Command, args []string) error { + if createLayerDataOpts.ID == "" { + return errors.New("layer ID not specified") + } + if createLayerDataKey == "" { + return errors.New("layer data name not specified") + } + if createLayerDataValue == "" && createLayerDataFile == "" { + return errors.New("neither layer data value nor file specified") + } + createLayerDataOpts.Data = make(map[string][]byte) + if createLayerDataValue != "" { + createLayerDataOpts.Data[createLayerDataKey] = []byte(createLayerDataValue) + } + if createLayerDataFile != "" { + buf, err := os.ReadFile(createLayerDataFile) + if err != nil { + return err + } + createLayerDataOpts.Data[createLayerDataKey] = buf + } + _, err := testingEngine.CreateLayerData(mainContext, createLayerDataOpts) + if err != nil { + return err + } + return nil +} + +func createImageData(cmd *cobra.Command, args []string) error { + if createImageDataOpts.ID == "" { + return errors.New("image ID not specified") + } + if createImageDataKey == "" { + return errors.New("image data name not specified") + } + if createImageDataValue == "" && createImageDataFile == "" { + return errors.New("neither image data value nor file specified") + } + createImageDataOpts.Data = make(map[string][]byte) + if createImageDataValue != "" { + createImageDataOpts.Data[createImageDataKey] = []byte(createImageDataValue) + } + if createImageDataFile != "" { + d, err := os.ReadFile(createImageDataFile) + if err != nil { + return err + } + createImageDataOpts.Data[createImageDataKey] = d + } + _, err := testingEngine.CreateImageData(mainContext, createImageDataOpts) + if err != nil { + return err + } + return nil +} + +func createContainerData(cmd *cobra.Command, args []string) error { + if createContainerDataOpts.ID == "" { + return errors.New("container ID not specified") + } + if createContainerDataKey == "" { + return errors.New("container data name not specified") + } + if createContainerDataValue == "" && createContainerDataFile == "" { + return errors.New("neither container data value nor file specified") + } + createContainerDataOpts.Data = make(map[string][]byte) + if createContainerDataValue != "" { + createContainerDataOpts.Data[createContainerDataKey] = []byte(createContainerDataValue) + } + if createContainerDataFile != "" { + d, err := os.ReadFile(createContainerDataFile) + if err != nil { + return err + } + createContainerDataOpts.Data[createContainerDataKey] = d + } + _, err := testingEngine.CreateContainerData(mainContext, createContainerDataOpts) + if err != nil { + return err + } + return nil +} + +func modifyLayerData(cmd *cobra.Command, args []string) error { + if modifyLayerDataOpts.ID == "" { + return errors.New("layer ID not specified") + } + if modifyLayerDataOpts.Key == "" { + return errors.New("layer data name not specified") + } + if modifyLayerDataValue == "" && modifyLayerDataFile == "" { + return errors.New("neither layer data value nor file specified") + } + modifyLayerDataOpts.Data = []byte(modifyLayerDataValue) + if modifyLayerDataFile != "" { + d, err := os.ReadFile(modifyLayerDataFile) + if err != nil { + return err + } + modifyLayerDataOpts.Data = d + } + _, err := testingEngine.ModifyLayerData(mainContext, modifyLayerDataOpts) + if err != nil { + return err + } + return nil +} + +func modifyImageData(cmd *cobra.Command, args []string) error { + if modifyImageDataOpts.ID == "" { + return errors.New("image ID not specified") + } + if modifyImageDataOpts.Key == "" { + return errors.New("image data name not specified") + } + if modifyImageDataValue == "" && modifyImageDataFile == "" { + return errors.New("neither image data value nor file specified") + } + modifyImageDataOpts.Data = []byte(modifyImageDataValue) + if modifyImageDataFile != "" { + d, err := os.ReadFile(modifyImageDataFile) + if err != nil { + return err + } + modifyImageDataOpts.Data = d + } + _, err := testingEngine.ModifyImageData(mainContext, modifyImageDataOpts) + if err != nil { + return err + } + return nil +} + +func modifyContainerData(cmd *cobra.Command, args []string) error { + if modifyContainerDataOpts.ID == "" { + return errors.New("container ID not specified") + } + if modifyContainerDataOpts.Key == "" { + return errors.New("container data name not specified") + } + if modifyContainerDataValue == "" && modifyContainerDataFile == "" { + return errors.New("neither container data value nor file specified") + } + modifyContainerDataOpts.Data = []byte(modifyContainerDataValue) + if modifyContainerDataFile != "" { + d, err := os.ReadFile(modifyContainerDataFile) + if err != nil { + return err + } + modifyContainerDataOpts.Data = d + } + _, err := testingEngine.ModifyContainerData(mainContext, modifyContainerDataOpts) + if err != nil { + return err + } + return nil +} + +func removeLayerData(cmd *cobra.Command, args []string) error { + if removeLayerDataOpts.ID == "" { + return errors.New("layer ID not specified") + } + if removeLayerDataOpts.Key == "" { + return errors.New("layer data name not specified") + } + _, err := testingEngine.RemoveLayerData(mainContext, removeLayerDataOpts) + if err != nil { + return err + } + return nil +} + +func removeImageData(cmd *cobra.Command, args []string) error { + if removeImageDataOpts.ID == "" { + return errors.New("image ID not specified") + } + if removeImageDataOpts.Key == "" { + return errors.New("image data name not specified") + } + _, err := testingEngine.RemoveImageData(mainContext, removeImageDataOpts) + if err != nil { + return err + } + return nil +} + +func removeContainerData(cmd *cobra.Command, args []string) error { + if removeContainerDataOpts.ID == "" { + return errors.New("container ID not specified") + } + if removeContainerDataOpts.Key == "" { + return errors.New("container data name not specified") + } + _, err := testingEngine.RemoveContainerData(mainContext, removeContainerDataOpts) + if err != nil { + return err + } + return nil +} diff --git a/cmd/podman-testing/layer.go b/cmd/podman-testing/layer.go new file mode 100644 index 0000000000..ae4de28ca1 --- /dev/null +++ b/cmd/podman-testing/layer.go @@ -0,0 +1,91 @@ +package main + +import ( + "errors" + "os" + + "github.com/containers/common/pkg/completion" + "github.com/containers/podman/v5/cmd/podman/validate" + "github.com/containers/podman/v5/internal/domain/entities" + "github.com/spf13/cobra" +) + +var ( + populateLayerDescription = `Populate a layer in local storage.` + populateLayerCmd = &cobra.Command{ + Use: "populate-layer [options]", + Args: validate.NoArgs, + Short: "Populate a layer", + Long: populateLayerDescription, + RunE: populateLayer, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman testing populate-layer`, + } + + populateLayerOpts entities.PopulateLayerOptions + populateLayerFile string + + modifyLayerDescription = `Modify a layer in local storage, corrupting it.` + modifyLayerCmd = &cobra.Command{ + Use: "modify-layer [options]", + Args: validate.NoArgs, + Short: "Modify the contents of a layer", + Long: modifyLayerDescription, + RunE: modifyLayer, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman testing modify-layer`, + } + + modifyLayerOpts entities.ModifyLayerOptions + modifyLayerFile string +) + +func init() { + mainCmd.AddCommand(populateLayerCmd) + flags := populateLayerCmd.Flags() + flags.StringVarP(&populateLayerOpts.ID, "layer", "l", "", "ID of layer to be populated") + flags.StringVarP(&populateLayerFile, "file", "f", "", "archive of contents to extract in layer") + + mainCmd.AddCommand(modifyLayerCmd) + flags = modifyLayerCmd.Flags() + flags.StringVarP(&modifyLayerOpts.ID, "layer", "l", "", "ID of layer to be modified") + flags.StringVarP(&modifyLayerFile, "file", "f", "", "archive of contents to extract over layer") +} + +func populateLayer(cmd *cobra.Command, args []string) error { + if populateLayerOpts.ID == "" { + return errors.New("layer ID not specified") + } + if populateLayerFile == "" { + return errors.New("layer contents file not specified") + } + buf, err := os.ReadFile(populateLayerFile) + if err != nil { + return err + } + populateLayerOpts.ContentsArchive = buf + _, err = testingEngine.PopulateLayer(mainContext, populateLayerOpts) + if err != nil { + return err + } + return nil +} + +func modifyLayer(cmd *cobra.Command, args []string) error { + if modifyLayerOpts.ID == "" { + return errors.New("layer ID not specified") + } + if modifyLayerFile == "" { + return errors.New("layer contents file not specified") + } + buf, err := os.ReadFile(modifyLayerFile) + if err != nil { + return err + } + modifyLayerOpts.ContentsArchive = buf + _, err = testingEngine.ModifyLayer(mainContext, modifyLayerOpts) + if err != nil { + return err + } + return nil +} diff --git a/cmd/podman-testing/main.go b/cmd/podman-testing/main.go new file mode 100644 index 0000000000..ccc1ff7454 --- /dev/null +++ b/cmd/podman-testing/main.go @@ -0,0 +1,128 @@ +package main + +import ( + "context" + "errors" + "fmt" + "os" + "os/exec" + "syscall" + + "github.com/containers/common/pkg/config" + _ "github.com/containers/podman/v5/cmd/podman/completion" + ientities "github.com/containers/podman/v5/internal/domain/entities" + "github.com/containers/podman/v5/internal/domain/infra" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/storage" + "github.com/containers/storage/pkg/reexec" + "github.com/containers/storage/pkg/unshare" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +var ( + mainCmd = &cobra.Command{ + Use: "podman-testing", + Long: "Assorted tools for use in testing podman", + RunE: func(cmd *cobra.Command, args []string) error { + return cmd.Help() + }, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + return before() + }, + PersistentPostRunE: func(cmd *cobra.Command, args []string) error { + return after() + }, + SilenceUsage: true, + SilenceErrors: true, + } + mainContext = context.Background() + podmanConfig entities.PodmanConfig + globalStorageOptions storage.StoreOptions + globalLogLevel string + testingEngine ientities.TestingEngine +) + +func init() { + podmanConfig.FlagSet = mainCmd.PersistentFlags() + fl := mainCmd.PersistentFlags() + fl.StringVar(&podmanConfig.DockerConfig, "docker-config", os.Getenv("DOCKER_CONFIG"), "path to .docker/config") + fl.StringVar(&globalLogLevel, "log-level", "warn", "logging level") + fl.StringVar(&podmanConfig.URI, "url", "", "URL to access Podman service") + fl.StringVar(&podmanConfig.RegistriesConf, "registries-conf", os.Getenv("REGISTRIES_CONF"), "path to registries.conf (REGISTRIES_CONF)") +} + +func before() error { + if globalLogLevel != "" { + parsedLogLevel, err := logrus.ParseLevel(globalLogLevel) + if err != nil { + return fmt.Errorf("parsing log level %q: %w", globalLogLevel, err) + } + logrus.SetLevel(parsedLogLevel) + } + if err := storeBefore(); err != nil { + return fmt.Errorf("setting up storage: %w", err) + } + + podmanConfig.EngineMode = engineMode + podmanConfig.Remote = podmanConfig.URI != "" + + containersConf, err := config.Default() + if err != nil { + return fmt.Errorf("loading default configuration (may reference $CONTAINERS_CONF): %w", err) + } + podmanConfig.ContainersConfDefaultsRO = containersConf + containersConf, err = config.New(nil) + if err != nil { + return fmt.Errorf("loading default configuration (may reference $CONTAINERS_CONF): %w", err) + } + podmanConfig.ContainersConf = containersConf + + podmanConfig.StorageDriver = globalStorageOptions.GraphDriverName + podmanConfig.GraphRoot = globalStorageOptions.GraphRoot + podmanConfig.Runroot = globalStorageOptions.RunRoot + podmanConfig.ImageStore = globalStorageOptions.ImageStore + podmanConfig.StorageOpts = globalStorageOptions.GraphDriverOptions + podmanConfig.TransientStore = globalStorageOptions.TransientStore + + te, err := infra.NewTestingEngine(&podmanConfig) + if err != nil { + return fmt.Errorf("initializing libpod: %w", err) + } + testingEngine = te + return nil +} + +func after() error { + if err := storeAfter(); err != nil { + return fmt.Errorf("shutting down storage: %w", err) + } + return nil +} + +func main() { + if reexec.Init() { + // We were invoked with a different argv[0] indicating that we + // had a specific job to do as a subprocess, and it's done. + return + } + unshare.MaybeReexecUsingUserNamespace(false) + + exitCode := 1 + if err := mainCmd.Execute(); err != nil { + if logrus.IsLevelEnabled(logrus.TraceLevel) { + fmt.Fprintf(os.Stderr, "Error: %+v\n", err) + } else { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + } + var ee *exec.ExitError + if errors.As(err, &ee) { + if w, ok := ee.Sys().(syscall.WaitStatus); ok { + exitCode = w.ExitStatus() + } + } + } else { + exitCode = 0 + } + os.Exit(exitCode) +} diff --git a/cmd/podman-testing/remove.go b/cmd/podman-testing/remove.go new file mode 100644 index 0000000000..59dadfd2d0 --- /dev/null +++ b/cmd/podman-testing/remove.go @@ -0,0 +1,118 @@ +package main + +import ( + "fmt" + + "github.com/containers/common/pkg/completion" + "github.com/containers/podman/v5/cmd/podman/validate" + "github.com/containers/podman/v5/internal/domain/entities" + "github.com/spf13/cobra" +) + +var ( + removeStorageLayerDescription = `Remove an unmanaged layer in local storage, potentially corrupting it.` + removeStorageLayerCmd = &cobra.Command{ + Use: "remove-storage-layer [options]", + Args: validate.NoArgs, + Short: "Remove an unmanaged layer", + Long: removeStorageLayerDescription, + RunE: removeStorageLayer, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman testing remove-storage-layer`, + } + + removeStorageLayerOpts entities.RemoveStorageLayerOptions + + removeLayerDescription = `Remove a layer in local storage, potentially corrupting it.` + removeLayerCmd = &cobra.Command{ + Use: "remove-layer [options]", + Args: validate.NoArgs, + Short: "Remove a layer", + Long: removeLayerDescription, + RunE: removeLayer, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman testing remove-layer`, + } + + removeLayerOpts entities.RemoveLayerOptions + + removeImageDescription = `Remove an image in local storage, potentially corrupting it.` + removeImageCmd = &cobra.Command{ + Use: "remove-image [options]", + Args: validate.NoArgs, + Short: "Remove an image", + Long: removeImageDescription, + RunE: removeImage, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman testing remove-image`, + } + + removeImageOpts entities.RemoveImageOptions + + removeContainerDescription = `Remove a container in local storage, potentially corrupting it.` + removeContainerCmd = &cobra.Command{ + Use: "remove-container [options]", + Args: validate.NoArgs, + Short: "Remove an container", + Long: removeContainerDescription, + RunE: removeContainer, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman testing remove-container`, + } + + removeContainerOpts entities.RemoveContainerOptions +) + +func init() { + mainCmd.AddCommand(removeStorageLayerCmd) + flags := removeStorageLayerCmd.Flags() + flags.StringVarP(&removeStorageLayerOpts.ID, "layer", "i", "", "ID of the layer to remove") + + mainCmd.AddCommand(removeLayerCmd) + flags = removeLayerCmd.Flags() + flags.StringVarP(&removeLayerOpts.ID, "layer", "i", "", "ID of the layer to remove") + + mainCmd.AddCommand(removeImageCmd) + flags = removeImageCmd.Flags() + flags.StringVarP(&removeImageOpts.ID, "image", "i", "", "ID of the image to remove") + + mainCmd.AddCommand(removeContainerCmd) + flags = removeContainerCmd.Flags() + flags.StringVarP(&removeContainerOpts.ID, "container", "i", "", "ID of the container to remove") +} + +func removeStorageLayer(cmd *cobra.Command, args []string) error { + results, err := testingEngine.RemoveStorageLayer(mainContext, removeStorageLayerOpts) + if err != nil { + return err + } + fmt.Println(results.ID) + return nil +} + +func removeLayer(cmd *cobra.Command, args []string) error { + results, err := testingEngine.RemoveLayer(mainContext, removeLayerOpts) + if err != nil { + return err + } + fmt.Println(results.ID) + return nil +} + +func removeImage(cmd *cobra.Command, args []string) error { + results, err := testingEngine.RemoveImage(mainContext, removeImageOpts) + if err != nil { + return err + } + fmt.Println(results.ID) + return nil +} + +func removeContainer(cmd *cobra.Command, args []string) error { + results, err := testingEngine.RemoveContainer(mainContext, removeContainerOpts) + if err != nil { + return err + } + fmt.Println(results.ID) + return nil +} diff --git a/cmd/podman-testing/store_supported.go b/cmd/podman-testing/store_supported.go new file mode 100644 index 0000000000..b8e2fac5a7 --- /dev/null +++ b/cmd/podman-testing/store_supported.go @@ -0,0 +1,65 @@ +//go:build linux && !remote +// +build linux,!remote + +package main + +import ( + "fmt" + "os" + + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/storage" + "github.com/containers/storage/types" +) + +var ( + globalStore storage.Store + engineMode = entities.ABIMode +) + +func init() { + if defaultStoreOptions, err := storage.DefaultStoreOptions(); err == nil { + globalStorageOptions = defaultStoreOptions + } + if storageConf, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok { + options := globalStorageOptions + if types.ReloadConfigurationFileIfNeeded(storageConf, &options) == nil { + globalStorageOptions = options + } + } + fl := mainCmd.PersistentFlags() + fl.StringVar(&globalStorageOptions.GraphDriverName, "storage-driver", "", "storage driver used to manage images and containers") + fl.StringVar(&globalStorageOptions.GraphRoot, "root", "", "where images and containers will be stored") + fl.StringVar(&globalStorageOptions.RunRoot, "runroot", "", "where volatile state information will be stored") + fl.StringArrayVar(&globalStorageOptions.GraphDriverOptions, "storage-opt", nil, "storage driver options") + fl.StringVar(&globalStorageOptions.ImageStore, "imagestore", "", "where to store just some parts of images") + fl.BoolVar(&globalStorageOptions.TransientStore, "transient-store", false, "enable transient container storage") +} + +func storeBefore() error { + defaultStoreOptions, err := storage.DefaultStoreOptions() + if err != nil { + fmt.Fprintf(os.Stderr, "selecting storage options: %v", err) + return nil + } + globalStorageOptions = defaultStoreOptions + store, err := storage.GetStore(globalStorageOptions) + if err != nil { + return err + } + globalStore = store + if podmanConfig.URI != "" { + engineMode = entities.TunnelMode + } else { + engineMode = entities.ABIMode + } + return nil +} + +func storeAfter() error { + if globalStore != nil { + _, err := globalStore.Shutdown(false) + return err + } + return nil +} diff --git a/cmd/podman-testing/store_unsupported.go b/cmd/podman-testing/store_unsupported.go new file mode 100644 index 0000000000..de79ed88b0 --- /dev/null +++ b/cmd/podman-testing/store_unsupported.go @@ -0,0 +1,16 @@ +//go:build !linux || remote +// +build !linux remote + +package main + +import "github.com/containers/podman/v5/pkg/domain/entities" + +const engineMode = entities.TunnelMode + +func storeBefore() error { + return nil +} + +func storeAfter() error { + return nil +} diff --git a/cmd/podman-wslkerninst/main.go b/cmd/podman-wslkerninst/main.go index a12306ba81..6f34b2bbf0 100644 --- a/cmd/podman-wslkerninst/main.go +++ b/cmd/podman-wslkerninst/main.go @@ -16,9 +16,12 @@ import ( ) const ( + //nolint:stylecheck MB_ICONWARNING = 0x00000030 - MB_OK = 0x00000000 - MB_DEFBUTTON1 = 0x00000000 + //nolint:stylecheck + MB_OK = 0x00000000 + //nolint:stylecheck + MB_DEFBUTTON1 = 0x00000000 ) const KernelWarning = "WSL Kernel installation did not complete successfully. " + @@ -85,7 +88,7 @@ func warn(title string, caption string) int { func main() { args := os.Args - setupLogging(path.Base(args[0])) + _, _ = setupLogging(path.Base(args[0])) if wutil.IsWSLInstalled() { // nothing to do logrus.Info("WSL Kernel already installed") diff --git a/cmd/podman/common/build.go b/cmd/podman/common/build.go index 3411f43ec7..ceb8e66494 100644 --- a/cmd/podman/common/build.go +++ b/cmd/podman/common/build.go @@ -75,7 +75,7 @@ func DefineBuildFlags(cmd *cobra.Command, buildOpts *BuildFlagsWrapper, isFarmBu if err := flag.Value.Set("missing"); err != nil { logrus.Errorf("Unable to set --pull to 'missing': %v", err) } - flag.Usage = `Pull image policy ("always/true"|"missing"|"never/false"|"newer")` + flag.Usage = `Pull image policy ("always"|"missing"|"never"|"newer")` flags.AddFlagSet(&budFlags) // Add the completion functions @@ -113,14 +113,20 @@ func DefineBuildFlags(cmd *cobra.Command, buildOpts *BuildFlagsWrapper, isFarmBu completion.CompleteCommandFlags(cmd, fromAndBudFlagsCompletions) flags.SetNormalizeFunc(buildahCLI.AliasFlags) if registry.IsRemote() { + // Unset the isolation default as we never want to send this over the API + // as it can be wrong (root vs rootless). + _ = flags.Lookup("isolation").Value.Set("") _ = flags.MarkHidden("disable-content-trust") _ = flags.MarkHidden("sign-by") _ = flags.MarkHidden("signature-policy") - _ = flags.MarkHidden("tls-verify") _ = flags.MarkHidden("compress") _ = flags.MarkHidden("output") _ = flags.MarkHidden("logsplit") _ = flags.MarkHidden("cw") + // Support for farm build in podman-remote + if !isFarmBuild { + _ = flags.MarkHidden("tls-verify") + } } if isFarmBuild { for _, f := range FarmBuildHiddenFlags { @@ -130,9 +136,8 @@ func DefineBuildFlags(cmd *cobra.Command, buildOpts *BuildFlagsWrapper, isFarmBu } func ParseBuildOpts(cmd *cobra.Command, args []string, buildOpts *BuildFlagsWrapper) (*entities.BuildOptions, error) { - if (cmd.Flags().Changed("squash") && cmd.Flags().Changed("layers")) || - (cmd.Flags().Changed("squash-all") && cmd.Flags().Changed("squash")) { - return nil, errors.New("cannot specify --squash with --layers and --squash-all with --squash") + if cmd.Flags().Changed("squash-all") && cmd.Flags().Changed("squash") { + return nil, errors.New("cannot specify --squash-all with --squash") } if cmd.Flag("output").Changed && registry.IsRemote() { @@ -197,10 +202,7 @@ func ParseBuildOpts(cmd *cobra.Command, args []string, buildOpts *BuildFlagsWrap // No context directory or URL was specified. Try to use the home of // the first locally-available Containerfile. for i := range containerFiles { - if strings.HasPrefix(containerFiles[i], "http://") || - strings.HasPrefix(containerFiles[i], "https://") || - strings.HasPrefix(containerFiles[i], "git://") || - strings.HasPrefix(containerFiles[i], "github.com/") { + if isURL(containerFiles[i]) { continue } absFile, err := filepath.Abs(containerFiles[i]) @@ -236,6 +238,10 @@ func ParseBuildOpts(cmd *cobra.Command, args []string, buildOpts *BuildFlagsWrap } } + if err := areContainerfilesValid(contextDir, containerFiles); err != nil { + return nil, err + } + var logFile *os.File if cmd.Flag("logfile").Changed { var err error @@ -310,7 +316,9 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *Buil pullPolicy = buildahDefine.PullAlways } - if flags.PullNever || strings.EqualFold(strings.TrimSpace(flags.Pull), "never") { + if flags.PullNever || + strings.EqualFold(strings.TrimSpace(flags.Pull), "false") || + strings.EqualFold(strings.TrimSpace(flags.Pull), "never") { pullPolicy = buildahDefine.PullNever } @@ -398,9 +406,14 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *Buil compression = buildahDefine.Uncompressed } - isolation, err := parse.IsolationOption(flags.Isolation) - if err != nil { - return nil, err + isolation := buildahDefine.IsolationDefault + // Only parse the isolation when it is actually needed as we do not want to send a wrong default + // to the server in the remote case (root vs rootless). + if flags.Isolation != "" { + isolation, err = parse.IsolationOption(flags.Isolation) + if err != nil { + return nil, err + } } usernsOption, idmappingOptions, err := parse.IDMappingOptions(c, isolation) @@ -616,3 +629,41 @@ func parseDockerignore(ignoreFile string) ([]string, error) { } return excludes, nil } + +func areContainerfilesValid(contextDir string, containerFiles []string) error { + for _, f := range containerFiles { + if isURL(f) || f == "/dev/stdin" { + continue + } + + // Because currently podman runs the test/bud.bats tests under the buildah project in CI, + // the following error messages need to be consistent with buildah; otherwise, the podman CI will fail. + // See: https://github.com/containers/buildah/blob/4c781b59b49d66e07324566555339888113eb7e2/imagebuildah/build.go#L139-L141 + // https://github.com/containers/buildah/blob/4c781b59b49d66e07324566555339888113eb7e2/tests/bud.bats#L3474-L3479 + if utils.IsDir(f) { + return fmt.Errorf("containerfile: %q cannot be path to a directory", f) + } + + // If the file is not found, try again with context directory prepended (if not prepended yet) + // Ref: https://github.com/containers/buildah/blob/4c781b59b49d66e07324566555339888113eb7e2/imagebuildah/build.go#L125-L135 + if utils.FileExists(f) { + continue + } + if !strings.HasPrefix(f, contextDir) { + if utils.FileExists(filepath.Join(contextDir, f)) { + continue + } + } + + return fmt.Errorf("the specified Containerfile or Dockerfile does not exist, %s: %w", f, syscall.ENOENT) + } + + return nil +} + +func isURL(s string) bool { + return strings.HasPrefix(s, "http://") || + strings.HasPrefix(s, "https://") || + strings.HasPrefix(s, "git://") || + strings.HasPrefix(s, "github.com/") +} diff --git a/cmd/podman/common/completion.go b/cmd/podman/common/completion.go index a8e9fc0102..10d433cf33 100644 --- a/cmd/podman/common/completion.go +++ b/cmd/podman/common/completion.go @@ -58,8 +58,13 @@ func setupContainerEngine(cmd *cobra.Command) (entities.ContainerEngine, error) } if !registry.IsRemote() { _, noMoveProcess := cmd.Annotations[registry.NoMoveProcess] + cgroupMode := "" - err := containerEngine.SetupRootless(registry.Context(), noMoveProcess) + if flag := cmd.LocalFlags().Lookup("cgroups"); flag != nil { + cgroupMode = flag.Value.String() + } + + err := containerEngine.SetupRootless(registry.Context(), noMoveProcess, cgroupMode) if err != nil { return nil, err } @@ -980,12 +985,12 @@ func AutocompleteImageVolume(cmd *cobra.Command, args []string, toComplete strin } // AutocompleteLogDriver - Autocomplete log-driver options. -// -> "journald", "none", "k8s-file", "passthrough" +// -> "journald", "none", "k8s-file", "passthrough", "passthrough-tty" func AutocompleteLogDriver(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { // don't show json-file logDrivers := []string{define.JournaldLogging, define.NoLogging, define.KubernetesLogging} if !registry.IsRemote() { - logDrivers = append(logDrivers, define.PassthroughLogging) + logDrivers = append(logDrivers, define.PassthroughLogging, define.PassthroughTTYLogging) } return logDrivers, cobra.ShellCompDirectiveNoFileComp } @@ -1298,7 +1303,7 @@ func getEntityType(cmd *cobra.Command, args []string, o interface{}) interface{} } // network logic if networks, _ := getNetworks(cmd, args[0], completeDefault); len(networks) > 0 { - return &types.Network{} + return &entities.NetworkInspectReport{} } return o } @@ -1420,10 +1425,10 @@ func AutocompleteEventFilter(cmd *cobra.Command, args []string, toComplete strin events.Exited.String(), events.Export.String(), events.Import.String(), events.Init.String(), events.Kill.String(), events.LoadFromArchive.String(), events.Mount.String(), events.NetworkConnect.String(), events.NetworkDisconnect.String(), events.Pause.String(), events.Prune.String(), events.Pull.String(), - events.Push.String(), events.Refresh.String(), events.Remove.String(), events.Rename.String(), - events.Renumber.String(), events.Restart.String(), events.Restore.String(), events.Save.String(), - events.Start.String(), events.Stop.String(), events.Sync.String(), events.Tag.String(), events.Unmount.String(), - events.Unpause.String(), events.Untag.String(), + events.PullError.String(), events.Push.String(), events.Refresh.String(), events.Remove.String(), + events.Rename.String(), events.Renumber.String(), events.Restart.String(), events.Restore.String(), + events.Save.String(), events.Start.String(), events.Stop.String(), events.Sync.String(), events.Tag.String(), + events.Unmount.String(), events.Unpause.String(), events.Untag.String(), events.Update.String(), }, cobra.ShellCompDirectiveNoFileComp } eventTypes := func(_ string) ([]string, cobra.ShellCompDirective) { diff --git a/cmd/podman/common/create.go b/cmd/podman/common/create.go index 3a1c7e5a6d..6de40987ee 100644 --- a/cmd/podman/common/create.go +++ b/cmd/podman/common/create.go @@ -1,8 +1,6 @@ package common import ( - "os" - "github.com/containers/common/pkg/auth" "github.com/containers/common/pkg/completion" commonFlag "github.com/containers/common/pkg/flag" @@ -396,6 +394,13 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions, ) _ = cmd.RegisterFlagCompletionFunc(requiresFlagName, AutocompleteContainers) + retryFlagName := "retry" + createFlags.Uint(retryFlagName, registry.RetryDefault(), "number of times to retry in case of failure when performing pull") + _ = cmd.RegisterFlagCompletionFunc(retryFlagName, completion.AutocompleteNone) + retryDelayFlagName := "retry-delay" + createFlags.String(retryDelayFlagName, registry.RetryDelayDefault(), "delay between retries in case of pull failures") + _ = cmd.RegisterFlagCompletionFunc(retryDelayFlagName, completion.AutocompleteNone) + createFlags.BoolVar( &cf.Rm, "rm", false, @@ -411,7 +416,7 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions, createFlags.StringVar( &cf.SdNotifyMode, sdnotifyFlagName, cf.SdNotifyMode, - `control sd-notify behavior ("container"|"conmon"|"ignore")`, + `control sd-notify behavior ("container"|"conmon"|"healthy"|"ignore")`, ) _ = cmd.RegisterFlagCompletionFunc(sdnotifyFlagName, AutocompleteSDNotify) @@ -636,7 +641,8 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions, `If a container with the same name exists, replace it`, ) } - if mode == entities.InfraMode || (mode == entities.CreateMode) { // infra container flags, create should also pick these up + // Restart is allowed for created, updated, and infra ctr + if mode == entities.InfraMode || mode == entities.CreateMode || mode == entities.UpdateMode { restartFlagName := "restart" createFlags.StringVar( &cf.Restart, @@ -644,7 +650,8 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions, `Restart policy to apply when a container exits ("always"|"no"|"never"|"on-failure"|"unless-stopped")`, ) _ = cmd.RegisterFlagCompletionFunc(restartFlagName, AutocompleteRestartOption) - + } + if mode == entities.InfraMode || (mode == entities.CreateMode) { // infra container flags, create should also pick these up shmSizeFlagName := "shm-size" createFlags.String( shmSizeFlagName, shmSize(), @@ -714,7 +721,7 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions, usernsFlagName := "userns" createFlags.String( - usernsFlagName, os.Getenv("PODMAN_USERNS"), + usernsFlagName, "", "User namespace to use", ) _ = cmd.RegisterFlagCompletionFunc(usernsFlagName, AutocompleteUserNamespace) diff --git a/cmd/podman/compose.go b/cmd/podman/compose.go index ddbce2ac13..cd764e5c61 100644 --- a/cmd/podman/compose.go +++ b/cmd/podman/compose.go @@ -1,5 +1,3 @@ -//go:build amd64 || arm64 - package main import ( @@ -16,10 +14,6 @@ import ( "github.com/containers/podman/v5/cmd/podman/registry" "github.com/containers/podman/v5/pkg/errorhandling" - "github.com/containers/podman/v5/pkg/machine" - "github.com/containers/podman/v5/pkg/machine/define" - "github.com/containers/podman/v5/pkg/machine/provider" - "github.com/containers/podman/v5/pkg/machine/vmconfigs" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -114,10 +108,8 @@ func composeDockerHost() (string, error) { return registry.DefaultAPIAddress(), nil } - // TODO need to add support for --connection and --url - connection, err := registry.PodmanConfig().ContainersConfDefaultsRO.GetConnection("", true) - if err != nil { - logrus.Info(err) + conf := registry.PodmanConfig() + if conf.URI == "" { switch runtime.GOOS { // If no default connection is set on Linux or FreeBSD, // we just use the local socket by default - just as @@ -132,71 +124,27 @@ func composeDockerHost() (string, error) { } } - parsedConnection, err := url.Parse(connection.URI) + parsedConnection, err := url.Parse(conf.URI) if err != nil { return "", fmt.Errorf("preparing connection to remote machine: %w", err) } // If the default connection does not point to a `podman // machine`, we cannot use a local path and need to use SSH. - if !connection.IsMachine { - // Compose doesn't like paths, so we optimistically + if !conf.MachineMode { + // Docker Compose v1 doesn't like paths for ssh, so we optimistically // assume the presence of a Docker socket on the remote // machine which is the case for podman machines. - return strings.TrimSuffix(connection.URI, parsedConnection.Path), nil - } - - machineProvider, err := provider.Get() - if err != nil { - return "", fmt.Errorf("getting machine provider: %w", err) - } - dirs, err := machine.GetMachineDirs(machineProvider.VMType()) - if err != nil { - return "", err - } - - machineList, err := vmconfigs.LoadMachinesInDir(dirs) - if err != nil { - return "", fmt.Errorf("listing machines: %w", err) + if parsedConnection.Scheme == "ssh" { + return strings.TrimSuffix(conf.URI, parsedConnection.Path), nil + } + return conf.URI, nil } - - // Now we know that the connection points to a machine and we - // can find the machine by looking for the one with the - // matching port. - connectionPort, err := strconv.Atoi(parsedConnection.Port()) + uri, err := getMachineConn(conf.URI, parsedConnection) if err != nil { - return "", fmt.Errorf("parsing connection port: %w", err) - } - for _, item := range machineList { - if connectionPort != item.SSH.Port { - continue - } - - state, err := machineProvider.State(item, false) - if err != nil { - return "", err - } - - if state != define.Running { - return "", fmt.Errorf("machine %s is not running but in state %s", item.Name, state) - } - - // TODO This needs to be wired back in when all providers are complete - // TODO Need someoone to plumb in the connection information below - // if machineProvider.VMType() == define.WSLVirt || machineProvider.VMType() == define.HyperVVirt { - // if info.ConnectionInfo.PodmanPipe == nil { - // return "", errors.New("pipe of machine is not set") - // } - // return strings.Replace(info.ConnectionInfo.PodmanPipe.Path, `\\.\pipe\`, "npipe:////./pipe/", 1), nil - // } - // if info.ConnectionInfo.PodmanSocket == nil { - // return "", errors.New("socket of machine is not set") - // } - // return "unix://" + info.ConnectionInfo.PodmanSocket.Path, nil - return "", nil + return "", fmt.Errorf("get machine connection URI: %w", err) } - - return "", fmt.Errorf("could not find a matching machine for connection %q", connection.URI) + return uri, nil } // composeEnv returns the compose-specific environment variables. diff --git a/cmd/podman/compose_machine.go b/cmd/podman/compose_machine.go new file mode 100644 index 0000000000..7d70cc84c9 --- /dev/null +++ b/cmd/podman/compose_machine.go @@ -0,0 +1,70 @@ +//go:build amd64 || arm64 + +package main + +import ( + "errors" + "fmt" + "net/url" + "strconv" + "strings" + + "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/env" + "github.com/containers/podman/v5/pkg/machine/provider" + "github.com/containers/podman/v5/pkg/machine/vmconfigs" +) + +func getMachineConn(connectionURI string, parsedConnection *url.URL) (string, error) { + machineProvider, err := provider.Get() + if err != nil { + return "", fmt.Errorf("getting machine provider: %w", err) + } + dirs, err := env.GetMachineDirs(machineProvider.VMType()) + if err != nil { + return "", err + } + + machineList, err := vmconfigs.LoadMachinesInDir(dirs) + if err != nil { + return "", fmt.Errorf("listing machines: %w", err) + } + + // Now we know that the connection points to a machine and we + // can find the machine by looking for the one with the + // matching port. + connectionPort, err := strconv.Atoi(parsedConnection.Port()) + if err != nil { + return "", fmt.Errorf("parsing connection port: %w", err) + } + for _, mc := range machineList { + if connectionPort != mc.SSH.Port { + continue + } + + state, err := machineProvider.State(mc, false) + if err != nil { + return "", err + } + + if state != define.Running { + return "", fmt.Errorf("machine %s is not running but in state %s", mc.Name, state) + } + + podmanSocket, podmanPipe, err := mc.ConnectionInfo(machineProvider.VMType()) + if err != nil { + return "", err + } + if machineProvider.VMType() == define.WSLVirt || machineProvider.VMType() == define.HyperVVirt { + if podmanPipe == nil { + return "", errors.New("pipe of machine is not set") + } + return strings.Replace(podmanPipe.Path, `\\.\pipe\`, "npipe:////./pipe/", 1), nil + } + if podmanSocket == nil { + return "", errors.New("socket of machine is not set") + } + return "unix://" + podmanSocket.Path, nil + } + return "", fmt.Errorf("could not find a matching machine for connection %q", connectionURI) +} diff --git a/cmd/podman/compose_machine_unsupported.go b/cmd/podman/compose_machine_unsupported.go new file mode 100644 index 0000000000..f1ec7f67bd --- /dev/null +++ b/cmd/podman/compose_machine_unsupported.go @@ -0,0 +1,12 @@ +//go:build !(amd64 || arm64) + +package main + +import ( + "errors" + "net/url" +) + +func getMachineConn(connection string, parsedConnection *url.URL) (string, error) { + return "", errors.New("podman machine not supported on this architecture") +} diff --git a/cmd/podman/containers/clone.go b/cmd/podman/containers/clone.go index a34f121c1c..cff3f10c70 100644 --- a/cmd/podman/containers/clone.go +++ b/cmd/podman/containers/clone.go @@ -63,7 +63,7 @@ func clone(cmd *cobra.Command, args []string) error { ctrClone.Image = args[2] if !cliVals.RootFS { rawImageName := args[0] - name, err := PullImage(ctrClone.Image, &ctrClone.CreateOpts) + name, err := pullImage(cmd, ctrClone.Image, &ctrClone.CreateOpts) if err != nil { return err } diff --git a/cmd/podman/containers/create.go b/cmd/podman/containers/create.go index 49f2e98399..5edeb2afac 100644 --- a/cmd/podman/containers/create.go +++ b/cmd/podman/containers/create.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "os" + "slices" "strconv" "strings" @@ -23,7 +24,6 @@ import ( "github.com/containers/podman/v5/pkg/util" "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "golang.org/x/exp/slices" "golang.org/x/term" ) @@ -142,7 +142,7 @@ func create(cmd *cobra.Command, args []string) error { rawImageName := "" if !cliVals.RootFS { rawImageName = args[0] - name, err := PullImage(args[0], &cliVals) + name, err := pullImage(cmd, args[0], &cliVals) if err != nil { return err } @@ -189,7 +189,7 @@ func create(cmd *cobra.Command, args []string) error { } } - if cliVals.LogDriver != define.PassthroughLogging { + if cliVals.LogDriver != define.PassthroughLogging && cliVals.LogDriver != define.PassthroughTTYLogging { fmt.Println(report.Id) } return nil @@ -239,12 +239,17 @@ func CreateInit(c *cobra.Command, vals entities.ContainerCreateOptions, isInfra if cliVals.LogDriver == define.PassthroughLogging { if term.IsTerminal(0) || term.IsTerminal(1) || term.IsTerminal(2) { - return vals, errors.New("the '--log-driver passthrough' option cannot be used on a TTY") + return vals, errors.New("the '--log-driver passthrough' option cannot be used on a TTY. If you really want it, use '--log-driver passthrough-tty'") } if registry.IsRemote() { return vals, errors.New("the '--log-driver passthrough' option is not supported in remote mode") } } + if cliVals.LogDriver == define.PassthroughTTYLogging { + if registry.IsRemote() { + return vals, errors.New("the '--log-driver passthrough-tty' option is not supported in remote mode") + } + } if !isInfra { if c.Flag("cpu-period").Changed && c.Flag("cpus").Changed { @@ -331,7 +336,7 @@ func CreateInit(c *cobra.Command, vals entities.ContainerCreateOptions, isInfra } // Pulls image if any also parses and populates OS, Arch and Variant in specified container create options -func PullImage(imageName string, cliVals *entities.ContainerCreateOptions) (string, error) { +func pullImage(cmd *cobra.Command, imageName string, cliVals *entities.ContainerCreateOptions) (string, error) { pullPolicy, err := config.ParsePullPolicy(cliVals.Pull) if err != nil { return "", err @@ -360,7 +365,7 @@ func PullImage(imageName string, cliVals *entities.ContainerCreateOptions) (stri return "unable to obtain decryption config", err } - pullReport, pullErr := registry.ImageEngine().Pull(registry.GetContext(), imageName, entities.ImagePullOptions{ + pullOptions := entities.ImagePullOptions{ Authfile: cliVals.Authfile, Quiet: cliVals.Quiet, Arch: cliVals.Arch, @@ -370,7 +375,27 @@ func PullImage(imageName string, cliVals *entities.ContainerCreateOptions) (stri PullPolicy: pullPolicy, SkipTLSVerify: skipTLSVerify, OciDecryptConfig: decConfig, - }) + } + + if cmd.Flags().Changed("retry") { + retry, err := cmd.Flags().GetUint("retry") + if err != nil { + return "", err + } + + pullOptions.Retry = &retry + } + + if cmd.Flags().Changed("retry-delay") { + val, err := cmd.Flags().GetString("retry-delay") + if err != nil { + return "", err + } + + pullOptions.RetryDelay = val + } + + pullReport, pullErr := registry.ImageEngine().Pull(registry.GetContext(), imageName, pullOptions) if pullErr != nil { return "", pullErr } diff --git a/cmd/podman/containers/ps.go b/cmd/podman/containers/ps.go index bf82cf9de5..ed2968ebbf 100644 --- a/cmd/podman/containers/ps.go +++ b/cmd/podman/containers/ps.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "os" + "slices" "strconv" "strings" "time" @@ -160,7 +161,7 @@ func jsonOut(responses []entities.ListContainer) error { return nil } -func quietOut(responses []entities.ListContainer) error { +func quietOut(responses []entities.ListContainer) { for _, r := range responses { id := r.ID if !noTrunc { @@ -168,7 +169,6 @@ func quietOut(responses []entities.ListContainer) error { } fmt.Println(id) } - return nil } func getResponses() ([]entities.ListContainer, error) { @@ -216,7 +216,8 @@ func ps(cmd *cobra.Command, _ []string) error { case report.IsJSON(listOpts.Format): return jsonOut(listContainers) case listOpts.Quiet && !cmd.Flags().Changed("format"): - return quietOut(listContainers) + quietOut(listContainers) + return nil } responses := make([]psReporter, 0, len(listContainers)) @@ -434,10 +435,7 @@ func (l psReporter) Networks() string { // Ports converts from Portmappings to the string form // required by ps func (l psReporter) Ports() string { - if len(l.ListContainer.Ports) < 1 { - return "" - } - return portsToString(l.ListContainer.Ports) + return portsToString(l.ListContainer.Ports, l.ListContainer.ExposedPorts) } // CreatedAt returns the container creation time in string format. podman @@ -446,7 +444,7 @@ func (l psReporter) CreatedAt() string { return l.Created.String() } -// CreateHuman allows us to output the created time in human readable format +// CreatedHuman allows us to output the created time in human readable format func (l psReporter) CreatedHuman() string { return units.HumanDuration(time.Since(l.Created)) + " ago" } @@ -489,8 +487,8 @@ func (l psReporter) UTS() string { // portsToString converts the ports used to a string of the from "port1, port2" // and also groups a continuous list of ports into a readable format. // The format is IP:HostPort(-Range)->ContainerPort(-Range)/Proto -func portsToString(ports []types.PortMapping) string { - if len(ports) == 0 { +func portsToString(ports []types.PortMapping, exposedPorts map[uint16][]string) string { + if len(ports) == 0 && len(exposedPorts) == 0 { return "" } sb := &strings.Builder{} @@ -512,6 +510,20 @@ func portsToString(ports []types.PortMapping) string { } } } + + // iterating a map is not deterministic so let's convert slice first and sort by port to make it deterministic + sortedPorts := make([]uint16, 0, len(exposedPorts)) + for port := range exposedPorts { + sortedPorts = append(sortedPorts, port) + } + slices.Sort(sortedPorts) + for _, port := range sortedPorts { + for _, protocol := range exposedPorts[port] { + // exposed ports do not have a host part and are just written as "NUM/PROTO" + fmt.Fprintf(sb, "%d/%s, ", port, protocol) + } + } + display := sb.String() // make sure to trim the last ", " of the string return display[:len(display)-2] diff --git a/cmd/podman/containers/rm.go b/cmd/podman/containers/rm.go index 45b4b4ec58..0efac2a281 100644 --- a/cmd/podman/containers/rm.go +++ b/cmd/podman/containers/rm.go @@ -132,6 +132,9 @@ func rm(cmd *cobra.Command, args []string) error { logrus.Debug("--all is set: enforcing --depend=true") rmOptions.Depend = true } + if rmOptions.Force { + rmOptions.Ignore = true + } return removeContainers(utils.RemoveSlash(args), rmOptions, true, false) } @@ -144,9 +147,6 @@ func removeContainers(namesOrIDs []string, rmOptions entities.RmOptions, setExit var errs utils.OutputErrors responses, err := registry.ContainerEngine().ContainerRm(context.Background(), namesOrIDs, rmOptions) if err != nil { - if rmOptions.Force && strings.Contains(err.Error(), define.ErrNoSuchCtr.Error()) { - return nil - } if setExit { setExitCode(err) } @@ -158,9 +158,6 @@ func removeContainers(namesOrIDs []string, rmOptions entities.RmOptions, setExit if errors.Is(r.Err, define.ErrWillDeadlock) { logrus.Errorf("Potential deadlock detected - please run 'podman system renumber' to resolve") } - if rmOptions.Force && strings.Contains(r.Err.Error(), define.ErrNoSuchCtr.Error()) { - continue - } if setExit { setExitCode(r.Err) } diff --git a/cmd/podman/containers/run.go b/cmd/podman/containers/run.go index 047dabb9c8..46c4a3a66d 100644 --- a/cmd/podman/containers/run.go +++ b/cmd/podman/containers/run.go @@ -150,7 +150,7 @@ func run(cmd *cobra.Command, args []string) error { rawImageName := "" if !cliVals.RootFS { rawImageName = args[0] - name, err := PullImage(args[0], &cliVals) + name, err := pullImage(cmd, args[0], &cliVals) if err != nil { return err } @@ -173,7 +173,7 @@ func run(cmd *cobra.Command, args []string) error { runOpts.InputStream = nil } - passthrough := cliVals.LogDriver == define.PassthroughLogging + passthrough := cliVals.LogDriver == define.PassthroughLogging || cliVals.LogDriver == define.PassthroughTTYLogging // If attach is set, clear stdin/stdout/stderr and only attach requested if cmd.Flag("attach").Changed { diff --git a/cmd/podman/containers/stats.go b/cmd/podman/containers/stats.go index da6e24cccf..dd275e6d9e 100644 --- a/cmd/podman/containers/stats.go +++ b/cmd/podman/containers/stats.go @@ -15,7 +15,6 @@ import ( "github.com/containers/podman/v5/cmd/podman/validate" "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/pkg/domain/entities" - "github.com/containers/podman/v5/utils" "github.com/docker/go-units" "github.com/spf13/cobra" ) @@ -243,12 +242,7 @@ func (s *containerStats) MemUsageBytes() string { } func floatToPercentString(f float64) string { - strippedFloat, err := utils.RemoveScientificNotationFromFloat(f) - if err != nil { - // If things go bazinga, return a safe value - return "--" - } - return fmt.Sprintf("%.2f", strippedFloat) + "%" + return fmt.Sprintf("%.2f%%", f) } func combineHumanValues(a, b uint64) string { diff --git a/cmd/podman/containers/update.go b/cmd/podman/containers/update.go index bcaa735a9c..9e8e28070a 100644 --- a/cmd/podman/containers/update.go +++ b/cmd/podman/containers/update.go @@ -7,9 +7,11 @@ import ( "github.com/containers/podman/v5/cmd/podman/common" "github.com/containers/podman/v5/cmd/podman/registry" + "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/pkg/domain/entities" "github.com/containers/podman/v5/pkg/specgen" "github.com/containers/podman/v5/pkg/specgenutil" + "github.com/containers/podman/v5/pkg/util" "github.com/opencontainers/runtime-spec/specs-go" "github.com/spf13/cobra" ) @@ -70,6 +72,17 @@ func update(cmd *cobra.Command, args []string) error { return err } + if updateOpts.Restart != "" { + policy, retries, err := util.ParseRestartPolicy(updateOpts.Restart) + if err != nil { + return err + } + s.RestartPolicy = policy + if policy == define.RestartPolicyOnFailure { + s.RestartRetries = &retries + } + } + // we need to pass the whole specgen since throttle devices are parsed later due to cross compat. s.ResourceLimits, err = specgenutil.GetResources(s, &updateOpts) if err != nil { diff --git a/cmd/podman/diff/diff.go b/cmd/podman/diff/diff.go index 7fa47630a5..8fa9ca9880 100644 --- a/cmd/podman/diff/diff.go +++ b/cmd/podman/diff/diff.go @@ -73,6 +73,9 @@ func ValidateContainerDiffArgs(cmd *cobra.Command, args []string) error { return errors.New("--latest and containers cannot be used together") } if len(args) == 0 && !given { + if registry.IsRemote() { + return fmt.Errorf("%q requires a name or id", cmd.CommandPath()) + } return fmt.Errorf("%q requires a name, id, or the \"--latest\" flag", cmd.CommandPath()) } return nil diff --git a/cmd/podman/farm/create.go b/cmd/podman/farm/create.go index 6e7c493797..a0171c860e 100644 --- a/cmd/podman/farm/create.go +++ b/cmd/podman/farm/create.go @@ -2,13 +2,13 @@ package farm import ( "fmt" + "slices" "github.com/containers/common/pkg/completion" "github.com/containers/common/pkg/config" "github.com/containers/podman/v5/cmd/podman/registry" "github.com/containers/podman/v5/cmd/podman/validate" "github.com/spf13/cobra" - "golang.org/x/exp/slices" ) var ( diff --git a/cmd/podman/farm/update.go b/cmd/podman/farm/update.go index aecdf00aeb..66ced176c2 100644 --- a/cmd/podman/farm/update.go +++ b/cmd/podman/farm/update.go @@ -3,6 +3,7 @@ package farm import ( "errors" "fmt" + "slices" "github.com/containers/common/pkg/completion" "github.com/containers/common/pkg/config" @@ -10,7 +11,6 @@ import ( "github.com/containers/podman/v5/cmd/podman/registry" "github.com/containers/podman/v5/cmd/podman/validate" "github.com/spf13/cobra" - "golang.org/x/exp/slices" ) var ( diff --git a/cmd/podman/images/load.go b/cmd/podman/images/load.go index c535d78e3e..6e8237d1d8 100644 --- a/cmd/podman/images/load.go +++ b/cmd/podman/images/load.go @@ -14,6 +14,7 @@ import ( "github.com/containers/podman/v5/cmd/podman/validate" "github.com/containers/podman/v5/pkg/domain/entities" "github.com/containers/podman/v5/pkg/util" + "github.com/containers/storage/pkg/fileutils" "github.com/spf13/cobra" "golang.org/x/term" ) @@ -85,7 +86,7 @@ func load(cmd *cobra.Command, args []string) error { loadOpts.Input = tmpfile } - if _, err := os.Stat(loadOpts.Input); err != nil { + if err := fileutils.Exists(loadOpts.Input); err != nil { return err } } else { diff --git a/cmd/podman/images/pull.go b/cmd/podman/images/pull.go index 47ab528b6f..d9669db874 100644 --- a/cmd/podman/images/pull.go +++ b/cmd/podman/images/pull.go @@ -114,23 +114,22 @@ func pullFlags(cmd *cobra.Command) { _ = cmd.RegisterFlagCompletionFunc(decryptionKeysFlagName, completion.AutocompleteDefault) retryFlagName := "retry" - flags.Uint(retryFlagName, cli.MaxPullPushRetries, "number of times to retry in case of failure when performing pull") + flags.Uint(retryFlagName, registry.RetryDefault(), "number of times to retry in case of failure when performing pull") _ = cmd.RegisterFlagCompletionFunc(retryFlagName, completion.AutocompleteNone) retryDelayFlagName := "retry-delay" - flags.String(retryDelayFlagName, cli.PullPushRetryDelay.String(), "delay between retries in case of pull failures") + flags.String(retryDelayFlagName, registry.RetryDelayDefault(), "delay between retries in case of pull failures") _ = cmd.RegisterFlagCompletionFunc(retryDelayFlagName, completion.AutocompleteNone) if registry.IsRemote() { _ = flags.MarkHidden(decryptionKeysFlagName) - } - if !registry.IsRemote() { + } else { certDirFlagName := "cert-dir" flags.StringVar(&pullOptions.CertDir, certDirFlagName, "", "`Pathname` of a directory containing TLS certificates and keys") _ = cmd.RegisterFlagCompletionFunc(certDirFlagName, completion.AutocompleteDefault) - } - if !registry.IsRemote() { - flags.StringVar(&pullOptions.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)") - _ = flags.MarkHidden("signature-policy") + + signaturePolicyFlagName := "signature-policy" + flags.StringVar(&pullOptions.SignaturePolicy, signaturePolicyFlagName, "", "`Pathname` of signature policy file (not usually used)") + _ = flags.MarkHidden(signaturePolicyFlagName) } } diff --git a/cmd/podman/images/push.go b/cmd/podman/images/push.go index f71e0fa7d5..a27e0756e3 100644 --- a/cmd/podman/images/push.go +++ b/cmd/podman/images/push.go @@ -111,6 +111,13 @@ func pushFlags(cmd *cobra.Command) { flags.BoolVarP(&pushOptions.Quiet, "quiet", "q", false, "Suppress output information when pushing images") flags.BoolVar(&pushOptions.RemoveSignatures, "remove-signatures", false, "Discard any pre-existing signatures in the image") + retryFlagName := "retry" + flags.Uint(retryFlagName, registry.RetryDefault(), "number of times to retry in case of failure when performing push") + _ = cmd.RegisterFlagCompletionFunc(retryFlagName, completion.AutocompleteNone) + retryDelayFlagName := "retry-delay" + flags.String(retryDelayFlagName, registry.RetryDelayDefault(), "delay between retries in case of push failures") + _ = cmd.RegisterFlagCompletionFunc(retryDelayFlagName, completion.AutocompleteNone) + signByFlagName := "sign-by" flags.StringVar(&pushOptions.SignBy, signByFlagName, "", "Add a signature at the destination using the specified key") _ = cmd.RegisterFlagCompletionFunc(signByFlagName, completion.AutocompleteNone) @@ -155,10 +162,10 @@ func pushFlags(cmd *cobra.Command) { _ = flags.MarkHidden(signPassphraseFileFlagName) _ = flags.MarkHidden(encryptionKeysFlagName) _ = flags.MarkHidden(encryptLayersFlagName) - } - if !registry.IsRemote() { - flags.StringVar(&pushOptions.SignaturePolicy, "signature-policy", "", "Path to a signature-policy file") - _ = flags.MarkHidden("signature-policy") + } else { + signaturePolicyFlagName := "signature-policy" + flags.StringVar(&pushOptions.SignaturePolicy, signaturePolicyFlagName, "", "Path to a signature-policy file") + _ = flags.MarkHidden(signaturePolicyFlagName) } } @@ -208,6 +215,24 @@ func imagePush(cmd *cobra.Command, args []string) error { pushOptions.OciEncryptConfig = encConfig pushOptions.OciEncryptLayers = encLayers + if cmd.Flags().Changed("retry") { + retry, err := cmd.Flags().GetUint("retry") + if err != nil { + return err + } + + pushOptions.Retry = &retry + } + + if cmd.Flags().Changed("retry-delay") { + val, err := cmd.Flags().GetString("retry-delay") + if err != nil { + return err + } + + pushOptions.RetryDelay = val + } + if cmd.Flags().Changed("compression-level") { val, err := cmd.Flags().GetInt("compression-level") if err != nil { @@ -232,7 +257,7 @@ func imagePush(cmd *cobra.Command, args []string) error { } if pushOptions.DigestFile != "" { - if err := os.WriteFile(pushOptions.DigestFile, []byte(report.ManifestDigest), 0644); err != nil { + if err := os.WriteFile(pushOptions.DigestFile, []byte(report.ManifestDigest), 0o644); err != nil { return err } } diff --git a/cmd/podman/images/rm.go b/cmd/podman/images/rm.go index b0409ce208..c03d5b0b72 100644 --- a/cmd/podman/images/rm.go +++ b/cmd/podman/images/rm.go @@ -3,14 +3,11 @@ package images import ( "errors" "fmt" - "strings" "github.com/containers/podman/v5/cmd/podman/common" "github.com/containers/podman/v5/cmd/podman/registry" - "github.com/containers/podman/v5/cmd/podman/utils" "github.com/containers/podman/v5/pkg/domain/entities" "github.com/containers/podman/v5/pkg/errorhandling" - "github.com/containers/storage/types" "github.com/spf13/cobra" "github.com/spf13/pflag" ) @@ -72,6 +69,10 @@ func rm(cmd *cobra.Command, args []string) error { return errors.New("when using the --all switch, you may not pass any images names or IDs") } + if imageOpts.Force { + imageOpts.Ignore = true + } + // Note: certain image-removal errors are non-fatal. Hence, the report // might be set even if err != nil. report, rmErrors := registry.ImageEngine().Remove(registry.GetContext(), args, imageOpts) @@ -85,19 +86,10 @@ func rm(cmd *cobra.Command, args []string) error { fmt.Println("Deleted: " + d) } } - for _, err := range rmErrors { - if !imageOpts.Force || !strings.Contains(err.Error(), types.ErrImageUnknown.Error()) { - registry.SetExitCode(report.ExitCode) - } - } } - - var errs utils.OutputErrors - for _, err := range rmErrors { - if imageOpts.Force && strings.Contains(err.Error(), types.ErrImageUnknown.Error()) { - continue - } - errs = append(errs, err) + if len(rmErrors) > 0 { + registry.SetExitCode(report.ExitCode) } - return errorhandling.JoinErrors(errs) + + return errorhandling.JoinErrors(rmErrors) } diff --git a/cmd/podman/images/save.go b/cmd/podman/images/save.go index 529b050530..9124fc43cf 100644 --- a/cmd/podman/images/save.go +++ b/cmd/podman/images/save.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "os" + "slices" "strings" "github.com/containers/common/pkg/completion" @@ -14,7 +15,6 @@ import ( "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/pkg/domain/entities" "github.com/spf13/cobra" - "golang.org/x/exp/slices" "golang.org/x/term" ) diff --git a/cmd/podman/images/sign.go b/cmd/podman/images/sign.go index 08aa93dea1..f4353b5aec 100644 --- a/cmd/podman/images/sign.go +++ b/cmd/podman/images/sign.go @@ -2,13 +2,13 @@ package images import ( "errors" - "os" "github.com/containers/common/pkg/auth" "github.com/containers/common/pkg/completion" "github.com/containers/podman/v5/cmd/podman/common" "github.com/containers/podman/v5/cmd/podman/registry" "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/storage/pkg/fileutils" "github.com/spf13/cobra" ) @@ -68,7 +68,7 @@ func sign(cmd *cobra.Command, args []string) error { var sigStoreDir string if len(signOptions.Directory) > 0 { sigStoreDir = signOptions.Directory - if _, err := os.Stat(sigStoreDir); err != nil { + if err := fileutils.Exists(sigStoreDir); err != nil { return err } } diff --git a/cmd/podman/images/trust_set.go b/cmd/podman/images/trust_set.go index a7acf21122..67ae1d32e0 100644 --- a/cmd/podman/images/trust_set.go +++ b/cmd/podman/images/trust_set.go @@ -4,13 +4,13 @@ import ( "fmt" "net/url" "regexp" + "slices" "github.com/containers/common/pkg/completion" "github.com/containers/podman/v5/cmd/podman/common" "github.com/containers/podman/v5/cmd/podman/registry" "github.com/containers/podman/v5/pkg/domain/entities" "github.com/spf13/cobra" - "golang.org/x/exp/slices" ) var ( diff --git a/cmd/podman/kube/generate.go b/cmd/podman/kube/generate.go index 6bf0357620..1ded2f8592 100644 --- a/cmd/podman/kube/generate.go +++ b/cmd/podman/kube/generate.go @@ -11,6 +11,7 @@ import ( "github.com/containers/podman/v5/cmd/podman/registry" "github.com/containers/podman/v5/cmd/podman/utils" "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/storage/pkg/fileutils" "github.com/spf13/cobra" ) @@ -83,6 +84,7 @@ func generateFlags(cmd *cobra.Command, podmanConfig *entities.PodmanConfig) { noTruncAnnotationsFlagName := "no-trunc" flags.BoolVar(&generateOptions.UseLongAnnotations, noTruncAnnotationsFlagName, false, "Don't truncate annotations to Kubernetes length (63 chars)") + _ = flags.MarkHidden(noTruncAnnotationsFlagName) podmanOnlyFlagName := "podman-only" flags.BoolVar(&generateOptions.PodmanOnly, podmanOnlyFlagName, false, "Add podman-only reserved annotations to the generated YAML file (Cannot be used by Kubernetes)") @@ -104,7 +106,7 @@ func generateKube(cmd *cobra.Command, args []string) error { } if cmd.Flags().Changed("filename") { - if _, err := os.Stat(generateFile); err == nil { + if err := fileutils.Exists(generateFile); err == nil { return fmt.Errorf("cannot write to %q; file exists", generateFile) } if err := os.WriteFile(generateFile, content, 0644); err != nil { diff --git a/cmd/podman/kube/play.go b/cmd/podman/kube/play.go index caa78add38..eba54f0193 100644 --- a/cmd/podman/kube/play.go +++ b/cmd/podman/kube/play.go @@ -22,6 +22,7 @@ import ( "github.com/containers/podman/v5/cmd/podman/utils" "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/libpod/shutdown" + "github.com/containers/podman/v5/pkg/annotations" "github.com/containers/podman/v5/pkg/domain/entities" "github.com/containers/podman/v5/pkg/errorhandling" "github.com/containers/podman/v5/pkg/util" @@ -133,7 +134,7 @@ func playFlags(cmd *cobra.Command) { _ = cmd.RegisterFlagCompletionFunc(logOptFlagName, common.AutocompleteLogOpt) usernsFlagName := "userns" - flags.StringVar(&playOptions.Userns, usernsFlagName, os.Getenv("PODMAN_USERNS"), + flags.StringVar(&playOptions.Userns, usernsFlagName, "", "User namespace to use", ) _ = cmd.RegisterFlagCompletionFunc(usernsFlagName, common.AutocompleteUserNamespace) @@ -171,6 +172,7 @@ func playFlags(cmd *cobra.Command) { noTruncFlagName := "no-trunc" flags.BoolVar(&playOptions.UseLongAnnotations, noTruncFlagName, false, "Use annotations that are not truncated to the Kubernetes maximum length of 63 characters") + _ = flags.MarkHidden(noTruncFlagName) if !registry.IsRemote() { certDirFlagName := "cert-dir" @@ -253,12 +255,13 @@ func play(cmd *cobra.Command, args []string) error { if playOptions.Annotations == nil { playOptions.Annotations = make(map[string]string) } - if len(val) > define.MaxKubeAnnotation && !playOptions.UseLongAnnotations { - return fmt.Errorf("annotation exceeds maximum size, %d, of kubernetes annotation: %s", define.MaxKubeAnnotation, val) - } playOptions.Annotations[key] = val } + if err := annotations.ValidateAnnotations(playOptions.Annotations); err != nil { + return err + } + for _, mac := range playOptions.macs { m, err := net.ParseMAC(mac) if err != nil { diff --git a/cmd/podman/machine/client9p.go b/cmd/podman/machine/client9p.go index 00f90fad4a..1153ecbd00 100644 --- a/cmd/podman/machine/client9p.go +++ b/cmd/podman/machine/client9p.go @@ -8,6 +8,7 @@ import ( "os/exec" "path/filepath" "strconv" + "time" "github.com/containers/common/pkg/completion" "github.com/containers/podman/v5/cmd/podman/registry" @@ -69,8 +70,22 @@ func client9p(portNum uint32, mountPath string) error { logrus.Infof("Going to mount 9p on vsock port %d to directory %s", portNum, mountPath) - // Host connects to non-hypervisor processes on the host running the VM. - conn, err := vsock.Dial(vsock.Host, portNum, nil) + // The server is starting at the same time. + // Perform up to 5 retries with a backoff. + var ( + conn *vsock.Conn + retries = 20 + ) + for i := 0; i < retries; i++ { + // Host connects to non-hypervisor processes on the host running the VM. + conn, err = vsock.Dial(vsock.Host, portNum, nil) + // If errors.Is worked on this error, we could detect non-timeout errors. + // But it doesn't. So retry 5 times regardless. + if err == nil { + break + } + time.Sleep(250 * time.Millisecond) + } if err != nil { return fmt.Errorf("dialing vsock port %d: %w", portNum, err) } diff --git a/cmd/podman/machine/info.go b/cmd/podman/machine/info.go index 8c3d5f727a..793bc4efe8 100644 --- a/cmd/podman/machine/info.go +++ b/cmd/podman/machine/info.go @@ -14,11 +14,11 @@ import ( "github.com/containers/podman/v5/cmd/podman/validate" "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/pkg/domain/entities" - "github.com/containers/podman/v5/pkg/machine" machineDefine "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/env" "github.com/containers/podman/v5/pkg/machine/vmconfigs" "github.com/spf13/cobra" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) var infoDescription = `Display information pertaining to the machine host.` @@ -100,7 +100,7 @@ func hostInfo() (*entities.MachineHostInfo, error) { host.Arch = runtime.GOARCH host.OS = runtime.GOOS - dirs, err := machine.GetMachineDirs(provider.VMType()) + dirs, err := env.GetMachineDirs(provider.VMType()) if err != nil { return nil, err } diff --git a/cmd/podman/machine/init.go b/cmd/podman/machine/init.go index 88f358d764..7759304544 100644 --- a/cmd/podman/machine/init.go +++ b/cmd/podman/machine/init.go @@ -7,6 +7,7 @@ import ( "os" "github.com/containers/common/pkg/completion" + "github.com/containers/common/pkg/strongunits" "github.com/containers/podman/v5/cmd/podman/registry" ldefine "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/libpod/events" @@ -14,6 +15,8 @@ import ( "github.com/containers/podman/v5/pkg/machine/define" "github.com/containers/podman/v5/pkg/machine/shim" "github.com/containers/podman/v5/pkg/machine/vmconfigs" + "github.com/shirou/gopsutil/v3/mem" + "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) @@ -101,9 +104,17 @@ func init() { flags.StringVar(&initOpts.Username, UsernameFlagName, cfg.ContainersConfDefaultsRO.Machine.User, "Username used in image") _ = initCmd.RegisterFlagCompletionFunc(UsernameFlagName, completion.AutocompleteDefault) + ImageFlagName := "image" + flags.StringVar(&initOpts.Image, ImageFlagName, cfg.ContainersConfDefaultsRO.Machine.Image, "Bootable image for machine") + _ = initCmd.RegisterFlagCompletionFunc(ImageFlagName, completion.AutocompleteDefault) + + // Deprecate image-path option, use --image instead ImagePathFlagName := "image-path" - flags.StringVar(&initOpts.ImagePath, ImagePathFlagName, "", "Path to bootable image") + flags.StringVar(&initOpts.Image, ImagePathFlagName, cfg.ContainersConfDefaultsRO.Machine.Image, "Bootable image for machine") _ = initCmd.RegisterFlagCompletionFunc(ImagePathFlagName, completion.AutocompleteDefault) + if err := flags.MarkDeprecated(ImagePathFlagName, "use --image instead"); err != nil { + logrus.Error("unable to mark image-path flag deprecated") + } VolumeFlagName := "volume" flags.StringArrayVarP(&initOpts.Volumes, VolumeFlagName, "v", cfg.ContainersConfDefaultsRO.Machine.Volumes.Get(), "Volumes to mount, source:target") @@ -187,6 +198,12 @@ func initMachine(cmd *cobra.Command, args []string) error { initOpts.UserModeNetworking = &initOptionalFlags.UserModeNetworking } + if cmd.Flags().Changed("memory") { + if err := checkMaxMemory(strongunits.MiB(initOpts.Memory)); err != nil { + return err + } + } + // TODO need to work this back in // if finished, err := vm.Init(initOpts); err != nil || !finished { // // Finished = true, err = nil - Success! Log a message with further instructions @@ -199,16 +216,11 @@ func initMachine(cmd *cobra.Command, args []string) error { // return err // } - mc, err := shim.Init(initOpts, provider) + err = shim.Init(initOpts, provider) if err != nil { return err } - // TODO callback needed for the configuration file - if err := mc.Write(); err != nil { - return err - } - newMachineEvent(events.Init, events.Event{Name: initOpts.Name}) fmt.Println("Machine init complete") @@ -222,3 +234,16 @@ func initMachine(cmd *cobra.Command, args []string) error { fmt.Printf("To start your machine run:\n\n\tpodman machine start%s\n\n", extra) return err } + +// checkMaxMemory gets the total system memory and compares it to the variable. if the variable +// is larger than the total memory, it returns an error +func checkMaxMemory(newMem strongunits.MiB) error { + memStat, err := mem.VirtualMemory() + if err != nil { + return err + } + if total := strongunits.B(memStat.Total); strongunits.B(memStat.Total) < newMem.ToBytes() { + return fmt.Errorf("requested amount of memory (%d MB) greater than total system memory (%d MB)", newMem, total) + } + return nil +} diff --git a/cmd/podman/machine/inspect.go b/cmd/podman/machine/inspect.go index 81699268d0..c94781a172 100644 --- a/cmd/podman/machine/inspect.go +++ b/cmd/podman/machine/inspect.go @@ -10,6 +10,7 @@ import ( "github.com/containers/podman/v5/cmd/podman/registry" "github.com/containers/podman/v5/cmd/podman/utils" "github.com/containers/podman/v5/pkg/machine" + "github.com/containers/podman/v5/pkg/machine/env" "github.com/containers/podman/v5/pkg/machine/vmconfigs" "github.com/spf13/cobra" ) @@ -47,7 +48,7 @@ func inspect(cmd *cobra.Command, args []string) error { var ( errs utils.OutputErrors ) - dirs, err := machine.GetMachineDirs(provider.VMType()) + dirs, err := env.GetMachineDirs(provider.VMType()) if err != nil { return err } @@ -67,31 +68,32 @@ func inspect(cmd *cobra.Command, args []string) error { if err != nil { return err } - ignFile, err := mc.IgnitionFile() + + podmanSocket, podmanPipe, err := mc.ConnectionInfo(provider.VMType()) + if err != nil { + return err + } + + rosetta, err := provider.GetRosetta(mc) if err != nil { return err } ii := machine.InspectInfo{ - // TODO I dont think this is useful - ConfigPath: *dirs.ConfigDir, - // TODO Fill this out - ConnectionInfo: machine.ConnectionConfig{}, - Created: mc.Created, - // TODO This is no longer applicable; we dont care about the provenance - // of the image - Image: machine.ImageConfig{ - IgnitionFile: *ignFile, - ImagePath: *mc.ImagePath, + ConfigDir: *dirs.ConfigDir, + ConnectionInfo: machine.ConnectionConfig{ + PodmanSocket: podmanSocket, + PodmanPipe: podmanPipe, }, + Created: mc.Created, LastUp: mc.LastUp, Name: mc.Name, Resources: mc.Resources, SSHConfig: mc.SSH, State: state, UserModeNetworking: provider.UserModeNetworkEnabled(mc), - // TODO I think this should be the HostUser - Rootful: mc.HostUser.Rootful, + Rootful: mc.HostUser.Rootful, + Rosetta: rosetta, } vms = append(vms, ii) diff --git a/cmd/podman/machine/list.go b/cmd/podman/machine/list.go index 4ab2260b2c..04ec6eec22 100644 --- a/cmd/podman/machine/list.go +++ b/cmd/podman/machine/list.go @@ -164,8 +164,8 @@ func toMachineFormat(vms []*machine.ListResponse, defaultCon string) []*entities response.Stream = streamName(vm.Stream) response.VMType = vm.VMType response.CPUs = vm.CPUs - response.Memory = strUint(vm.Memory) - response.DiskSize = strUint(vm.DiskSize) + response.Memory = strUint(uint64(vm.Memory.ToBytes())) + response.DiskSize = strUint(uint64(vm.DiskSize.ToBytes())) response.Port = vm.Port response.RemoteUsername = vm.RemoteUsername response.IdentityPath = vm.IdentityPath @@ -202,8 +202,8 @@ func toHumanFormat(vms []*machine.ListResponse, defaultCon string) []*entities.L response.Created = units.HumanDuration(time.Since(vm.CreatedAt)) + " ago" response.VMType = vm.VMType response.CPUs = vm.CPUs - response.Memory = units.BytesSize(float64(vm.Memory)) - response.DiskSize = units.BytesSize(float64(vm.DiskSize)) + response.Memory = units.BytesSize(float64(vm.Memory.ToBytes())) + response.DiskSize = units.BytesSize(float64(vm.DiskSize.ToBytes())) humanResponses = append(humanResponses, response) } diff --git a/cmd/podman/machine/machine.go b/cmd/podman/machine/machine.go index dc9b7b9f6b..1dd65c1ec8 100644 --- a/cmd/podman/machine/machine.go +++ b/cmd/podman/machine/machine.go @@ -15,7 +15,7 @@ import ( "github.com/containers/podman/v5/cmd/podman/registry" "github.com/containers/podman/v5/cmd/podman/validate" "github.com/containers/podman/v5/libpod/events" - "github.com/containers/podman/v5/pkg/machine" + "github.com/containers/podman/v5/pkg/machine/env" provider2 "github.com/containers/podman/v5/pkg/machine/provider" "github.com/containers/podman/v5/pkg/machine/vmconfigs" "github.com/containers/podman/v5/pkg/util" @@ -82,7 +82,7 @@ func getMachines(toComplete string) ([]string, cobra.ShellCompDirective) { if err != nil { return nil, cobra.ShellCompDirectiveNoFileComp } - dirs, err := machine.GetMachineDirs(provider.VMType()) + dirs, err := env.GetMachineDirs(provider.VMType()) if err != nil { return nil, cobra.ShellCompDirectiveNoFileComp } diff --git a/cmd/podman/machine/os/manager.go b/cmd/podman/machine/os/manager.go index 0d402cd5c9..1e028be238 100644 --- a/cmd/podman/machine/os/manager.go +++ b/cmd/podman/machine/os/manager.go @@ -10,6 +10,7 @@ import ( machineconfig "github.com/containers/common/pkg/machine" pkgMachine "github.com/containers/podman/v5/pkg/machine" + "github.com/containers/podman/v5/pkg/machine/env" pkgOS "github.com/containers/podman/v5/pkg/machine/os" "github.com/containers/podman/v5/pkg/machine/provider" "github.com/containers/podman/v5/pkg/machine/vmconfigs" @@ -52,7 +53,7 @@ func machineOSManager(opts ManagerOpts, _ vmconfigs.VMProvider) (pkgOS.Manager, if err != nil { return nil, err } - dirs, err := pkgMachine.GetMachineDirs(p.VMType()) + dirs, err := env.GetMachineDirs(p.VMType()) if err != nil { return nil, err } diff --git a/cmd/podman/machine/reset.go b/cmd/podman/machine/reset.go index 920b553a1f..9b76f97a86 100644 --- a/cmd/podman/machine/reset.go +++ b/cmd/podman/machine/reset.go @@ -7,13 +7,14 @@ import ( "fmt" "os" "strings" + "text/tabwriter" "github.com/containers/common/pkg/completion" "github.com/containers/podman/v5/cmd/podman/registry" "github.com/containers/podman/v5/cmd/podman/validate" "github.com/containers/podman/v5/pkg/machine" + provider2 "github.com/containers/podman/v5/pkg/machine/provider" "github.com/containers/podman/v5/pkg/machine/shim" - "github.com/containers/podman/v5/pkg/machine/vmconfigs" "github.com/spf13/cobra" ) @@ -22,7 +23,6 @@ var ( Use: "reset [options]", Short: "Remove all machines", Long: "Remove all machines, configurations, data, and cached images", - PersistentPreRunE: machinePreRunE, RunE: reset, Args: validate.NoArgs, Example: `podman machine reset`, @@ -50,21 +50,19 @@ func reset(_ *cobra.Command, _ []string) error { err error ) - dirs, err := machine.GetMachineDirs(provider.VMType()) - if err != nil { - return err - } - - // TODO we could consider saying we get a list of vms but can proceed - // to just delete all local disk dirs, etc. Maybe a --proceed? - mcs, err := vmconfigs.LoadMachinesInDir(dirs) + providers, err := provider2.GetAll(resetOptions.Force) if err != nil { return err } if !resetOptions.Force { - vms := vmNamesFromMcs(mcs) - resetConfirmationMessage(vms) + listResponse, err := shim.List(providers, machine.ListOptions{}) + if err != nil { + return err + } + + resetConfirmationMessage(listResponse) + reader := bufio.NewReader(os.Stdin) fmt.Print("\nAre you sure you want to continue? [y/N] ") answer, err := reader.ReadString('\n') @@ -75,24 +73,18 @@ func reset(_ *cobra.Command, _ []string) error { return nil } } - - // resetErr can be nil or a multi-error - return shim.Reset(dirs, provider, mcs) + return shim.Reset(providers, resetOptions) } -func resetConfirmationMessage(vms []string) { +func resetConfirmationMessage(listResponse []*machine.ListResponse) { fmt.Println("Warning: this command will delete all existing Podman machines") fmt.Println("and all of the configuration and data directories for Podman machines") fmt.Printf("\nThe following machine(s) will be deleted:\n\n") - for _, msg := range vms { - fmt.Printf("%s\n", msg) - } -} + w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0) + fmt.Fprintln(w, "NAME\tPROVIDER") -func vmNamesFromMcs(mcs map[string]*vmconfigs.MachineConfig) []string { - keys := make([]string, 0, len(mcs)) - for k := range mcs { - keys = append(keys, k) + for _, m := range listResponse { + fmt.Fprintf(w, "%s\t%s\n", m.Name, m.VMType) } - return keys + w.Flush() } diff --git a/cmd/podman/machine/rm.go b/cmd/podman/machine/rm.go index dff2cd86ec..5b76e1c19c 100644 --- a/cmd/podman/machine/rm.go +++ b/cmd/podman/machine/rm.go @@ -3,18 +3,12 @@ package machine import ( - "bufio" - "fmt" - "os" - "strings" - "github.com/containers/podman/v5/cmd/podman/registry" "github.com/containers/podman/v5/libpod/events" "github.com/containers/podman/v5/pkg/machine" - "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/env" "github.com/containers/podman/v5/pkg/machine/shim" "github.com/containers/podman/v5/pkg/machine/vmconfigs" - "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) @@ -61,7 +55,7 @@ func rm(_ *cobra.Command, args []string) error { vmName = args[0] } - dirs, err := machine.GetMachineDirs(provider.VMType()) + dirs, err := env.GetMachineDirs(provider.VMType()) if err != nil { return err } @@ -71,71 +65,9 @@ func rm(_ *cobra.Command, args []string) error { return err } - state, err := provider.State(mc, false) - if err != nil { - return err - } - - if state == define.Running { - if !destroyOptions.Force { - return &define.ErrVMRunningCannotDestroyed{Name: vmName} - } - if err := shim.Stop(mc, provider, dirs, true); err != nil { - return err - } - } - - rmFiles, genericRm, err := mc.Remove(destroyOptions.SaveIgnition, destroyOptions.SaveImage) - if err != nil { - return err - } - - providerFiles, providerRm, err := provider.Remove(mc) - if err != nil { + if err := shim.Remove(mc, provider, dirs, destroyOptions); err != nil { return err } - - // Add provider specific files to the list - rmFiles = append(rmFiles, providerFiles...) - - // Important! - // Nothing can be removed at this point. The user can still opt out below - // - - if !destroyOptions.Force { - // Warn user - confirmationMessage(rmFiles) - reader := bufio.NewReader(os.Stdin) - fmt.Print("Are you sure you want to continue? [y/N] ") - answer, err := reader.ReadString('\n') - if err != nil { - return err - } - if strings.ToLower(answer)[0] != 'y' { - return nil - } - } - - // - // All actual removal of files and vms should occur after this - // - - // TODO Should this be a hard error? - if err := providerRm(); err != nil { - logrus.Errorf("failed to remove virtual machine from provider for %q", vmName) - } - - // TODO Should this be a hard error? - if err := genericRm(); err != nil { - logrus.Error("failed to remove machines files") - } newMachineEvent(events.Remove, events.Event{Name: vmName}) return nil } - -func confirmationMessage(files []string) { - fmt.Printf("The following files will be deleted:\n\n\n") - for _, msg := range files { - fmt.Println(msg) - } -} diff --git a/cmd/podman/machine/set.go b/cmd/podman/machine/set.go index bf6a21c66d..07736bd507 100644 --- a/cmd/podman/machine/set.go +++ b/cmd/podman/machine/set.go @@ -3,13 +3,12 @@ package machine import ( - "fmt" - "github.com/containers/common/pkg/completion" "github.com/containers/common/pkg/strongunits" "github.com/containers/podman/v5/cmd/podman/registry" - "github.com/containers/podman/v5/pkg/machine" "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/env" + "github.com/containers/podman/v5/pkg/machine/shim" "github.com/containers/podman/v5/pkg/machine/vmconfigs" "github.com/spf13/cobra" ) @@ -89,16 +88,12 @@ func init() { } func setMachine(cmd *cobra.Command, args []string) error { - var ( - err error - ) - vmName := defaultMachineName if len(args) > 0 && len(args[0]) > 0 { vmName = args[0] } - dirs, err := machine.GetMachineDirs(provider.VMType()) + dirs, err := env.GetMachineDirs(provider.VMType()) if err != nil { return err } @@ -112,18 +107,16 @@ func setMachine(cmd *cobra.Command, args []string) error { setOpts.Rootful = &setFlags.Rootful } if cmd.Flags().Changed("cpus") { - mc.Resources.CPUs = setFlags.CPUs - setOpts.CPUs = &mc.Resources.CPUs + setOpts.CPUs = &setFlags.CPUs } if cmd.Flags().Changed("memory") { - mc.Resources.Memory = setFlags.Memory - setOpts.Memory = &mc.Resources.Memory + newMemory := strongunits.MiB(setFlags.Memory) + if err := checkMaxMemory(newMemory); err != nil { + return err + } + setOpts.Memory = &newMemory } if cmd.Flags().Changed("disk-size") { - if setFlags.DiskSize <= mc.Resources.DiskSize { - return fmt.Errorf("new disk size must be larger than %d GB", mc.Resources.DiskSize) - } - mc.Resources.DiskSize = setFlags.DiskSize newDiskSizeGB := strongunits.GiB(setFlags.DiskSize) setOpts.DiskSize = &newDiskSizeGB } @@ -136,10 +129,5 @@ func setMachine(cmd *cobra.Command, args []string) error { // At this point, we have the known changed information, etc // Walk through changes to the providers if they need them - if err := provider.SetProviderAttrs(mc, setOpts); err != nil { - return err - } - - // Update the configuration file last if everything earlier worked - return mc.Write() + return shim.Set(mc, provider, setOpts) } diff --git a/cmd/podman/machine/ssh.go b/cmd/podman/machine/ssh.go index 73bed582c4..94bc7449b8 100644 --- a/cmd/podman/machine/ssh.go +++ b/cmd/podman/machine/ssh.go @@ -7,6 +7,7 @@ import ( "net/url" "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/env" "github.com/containers/common/pkg/completion" "github.com/containers/podman/v5/cmd/podman/registry" @@ -54,7 +55,7 @@ func ssh(cmd *cobra.Command, args []string) error { validVM bool ) - dirs, err := machine.GetMachineDirs(provider.VMType()) + dirs, err := env.GetMachineDirs(provider.VMType()) if err != nil { return err } @@ -119,7 +120,7 @@ func ssh(cmd *cobra.Command, args []string) error { username = mc.SSH.RemoteUsername } - err = machine.CommonSSH(username, mc.SSH.IdentityPath, mc.Name, mc.SSH.Port, sshOpts.Args) + err = machine.CommonSSHShell(username, mc.SSH.IdentityPath, mc.Name, mc.SSH.Port, sshOpts.Args) return utils.HandleOSExecError(err) } diff --git a/cmd/podman/machine/start.go b/cmd/podman/machine/start.go index fb6c822b73..6a48eef14c 100644 --- a/cmd/podman/machine/start.go +++ b/cmd/podman/machine/start.go @@ -8,10 +8,9 @@ import ( "github.com/containers/podman/v5/cmd/podman/registry" "github.com/containers/podman/v5/libpod/events" "github.com/containers/podman/v5/pkg/machine" - "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/env" "github.com/containers/podman/v5/pkg/machine/shim" "github.com/containers/podman/v5/pkg/machine/vmconfigs" - "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) @@ -55,7 +54,7 @@ func start(_ *cobra.Command, args []string) error { vmName = args[0] } - dirs, err := machine.GetMachineDirs(provider.VMType()) + dirs, err := env.GetMachineDirs(provider.VMType()) if err != nil { return err } @@ -64,36 +63,10 @@ func start(_ *cobra.Command, args []string) error { return err } - state, err := provider.State(mc, false) - if err != nil { - return err - } - - if state == define.Running { - return define.ErrVMAlreadyRunning - } - - if err := shim.CheckExclusiveActiveVM(provider, mc); err != nil { - return err - } - if !startOpts.Quiet { fmt.Printf("Starting machine %q\n", vmName) } - // Set starting to true - mc.Starting = true - if err := mc.Write(); err != nil { - logrus.Error(err) - } - - // Set starting to false on exit - defer func() { - mc.Starting = false - if err := mc.Write(); err != nil { - logrus.Error(err) - } - }() if err := shim.Start(mc, provider, dirs, startOpts); err != nil { return err } diff --git a/cmd/podman/machine/stop.go b/cmd/podman/machine/stop.go index 5a1877957a..de7fc16e2d 100644 --- a/cmd/podman/machine/stop.go +++ b/cmd/podman/machine/stop.go @@ -4,14 +4,12 @@ package machine import ( "fmt" - "time" "github.com/containers/podman/v5/cmd/podman/registry" "github.com/containers/podman/v5/libpod/events" - "github.com/containers/podman/v5/pkg/machine" + "github.com/containers/podman/v5/pkg/machine/env" "github.com/containers/podman/v5/pkg/machine/shim" "github.com/containers/podman/v5/pkg/machine/vmconfigs" - "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) @@ -46,7 +44,7 @@ func stop(cmd *cobra.Command, args []string) error { vmName = args[0] } - dirs, err := machine.GetMachineDirs(provider.VMType()) + dirs, err := env.GetMachineDirs(provider.VMType()) if err != nil { return err } @@ -59,12 +57,6 @@ func stop(cmd *cobra.Command, args []string) error { return err } - // Update last time up - mc.LastUp = time.Now() - if err := mc.Write(); err != nil { - logrus.Errorf("unable to write configuration file: %q", err) - } - fmt.Printf("Machine %q stopped successfully\n", vmName) newMachineEvent(events.Stop, events.Event{Name: vmName}) return nil diff --git a/cmd/podman/main.go b/cmd/podman/main.go index 6a7ef36ae8..dd5b984d8d 100644 --- a/cmd/podman/main.go +++ b/cmd/podman/main.go @@ -43,12 +43,13 @@ func main() { if filepath.Base(os.Args[0]) == registry.PodmanSh || (len(os.Args[0]) > 0 && filepath.Base(os.Args[0][1:]) == registry.PodmanSh) { shell := strings.TrimPrefix(os.Args[0], "-") + cfg := registry.PodmanConfig() - args := []string{shell, "exec", "-i", "--wait", strconv.FormatUint(uint64(registry.PodmanConfig().ContainersConfDefaultsRO.Engine.PodmanshTimeout), 10)} + args := []string{shell, "exec", "-i", "--wait", strconv.FormatUint(uint64(cfg.ContainersConfDefaultsRO.PodmanshTimeout()), 10)} if term.IsTerminal(0) || term.IsTerminal(1) || term.IsTerminal(2) { args = append(args, "-t") } - args = append(args, registry.PodmanSh, "/bin/sh") + args = append(args, cfg.ContainersConfDefaultsRO.Podmansh.Container, cfg.ContainersConfDefaultsRO.Podmansh.Shell) if len(os.Args) > 1 { args = append(args, os.Args[1:]...) } diff --git a/cmd/podman/manifest/add.go b/cmd/podman/manifest/add.go index 15f22e4350..eafc9de3dc 100644 --- a/cmd/podman/manifest/add.go +++ b/cmd/podman/manifest/add.go @@ -4,6 +4,8 @@ import ( "context" "errors" "fmt" + "os" + "strings" "github.com/containers/common/pkg/auth" "github.com/containers/common/pkg/completion" @@ -12,30 +14,35 @@ import ( "github.com/containers/podman/v5/cmd/podman/registry" "github.com/containers/podman/v5/pkg/domain/entities" "github.com/containers/podman/v5/pkg/util" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/spf13/cobra" ) -// manifestAddOptsWrapper wraps entities.ManifestAddOptions and prevents leaking -// CLI-only fields into the API types. +// manifestAddOptsWrapper wraps entities.ManifestAddOptions and prevents +// leaking CLI-only fields into the API types. type manifestAddOptsWrapper struct { entities.ManifestAddOptions - - TLSVerifyCLI bool // CLI only - Insecure bool // CLI only - CredentialsCLI string + artifactOptions entities.ManifestAddArtifactOptions + + tlsVerifyCLI bool // CLI only + insecure bool // CLI only + credentialsCLI string // CLI only + artifact bool // CLI only + artifactConfigFile string // CLI only + artifactType string // CLI only } var ( manifestAddOpts = manifestAddOptsWrapper{} addCmd = &cobra.Command{ - Use: "add [options] LIST IMAGE [IMAGE...]", - Short: "Add images to a manifest list or image index", - Long: "Adds an image to a manifest list or image index.", + Use: "add [options] LIST IMAGEORARTIFACT [IMAGEORARTIFACT...]", + Short: "Add images or artifacts to a manifest list or image index", + Long: "Adds an image or artifact to a manifest list or image index.", RunE: add, Args: cobra.MinimumNArgs(2), ValidArgsFunction: common.AutocompleteImages, Example: `podman manifest add mylist:v1.11 image:v1.11-amd64 - podman manifest add mylist:v1.11 transport:imageName`, + podman manifest add mylist:v1.11 transport:imageName`, } ) @@ -55,6 +62,32 @@ func init() { flags.StringVar(&manifestAddOpts.Arch, archFlagName, "", "override the `architecture` of the specified image") _ = addCmd.RegisterFlagCompletionFunc(archFlagName, completion.AutocompleteArch) + artifactFlagName := "artifact" + flags.BoolVar(&manifestAddOpts.artifact, artifactFlagName, false, "add all arguments as artifact files rather than as images") + + artifactExcludeTitlesFlagName := "artifact-exclude-titles" + flags.BoolVar(&manifestAddOpts.artifactOptions.ExcludeTitles, artifactExcludeTitlesFlagName, false, fmt.Sprintf(`refrain from setting %q annotations on "layers"`, imgspecv1.AnnotationTitle)) + + artifactTypeFlagName := "artifact-type" + flags.StringVar(&manifestAddOpts.artifactType, artifactTypeFlagName, "", "override the artifactType value") + _ = addCmd.RegisterFlagCompletionFunc(artifactTypeFlagName, completion.AutocompleteNone) + + artifactConfigFlagName := "artifact-config" + flags.StringVar(&manifestAddOpts.artifactConfigFile, artifactConfigFlagName, "", "artifact configuration file") + _ = addCmd.RegisterFlagCompletionFunc(artifactConfigFlagName, completion.AutocompleteNone) + + artifactConfigTypeFlagName := "artifact-config-type" + flags.StringVar(&manifestAddOpts.artifactOptions.ConfigType, artifactConfigTypeFlagName, "", "artifact configuration media type") + _ = addCmd.RegisterFlagCompletionFunc(artifactConfigTypeFlagName, completion.AutocompleteNone) + + artifactLayerTypeFlagName := "artifact-layer-type" + flags.StringVar(&manifestAddOpts.artifactOptions.LayerType, artifactLayerTypeFlagName, "", "artifact layer media type") + _ = addCmd.RegisterFlagCompletionFunc(artifactLayerTypeFlagName, completion.AutocompleteNone) + + artifactSubjectFlagName := "artifact-subject" + flags.StringVar(&manifestAddOpts.IndexSubject, artifactSubjectFlagName, "", "artifact subject reference") + _ = addCmd.RegisterFlagCompletionFunc(artifactSubjectFlagName, completion.AutocompleteNone) + authfileFlagName := "authfile" flags.StringVar(&manifestAddOpts.Authfile, authfileFlagName, auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override") _ = addCmd.RegisterFlagCompletionFunc(authfileFlagName, completion.AutocompleteDefault) @@ -64,7 +97,7 @@ func init() { _ = addCmd.RegisterFlagCompletionFunc(certDirFlagName, completion.AutocompleteDefault) credsFlagName := "creds" - flags.StringVar(&manifestAddOpts.CredentialsCLI, credsFlagName, "", "use `[username[:password]]` for accessing the registry") + flags.StringVar(&manifestAddOpts.credentialsCLI, credsFlagName, "", "use `[username[:password]]` for accessing the registry") _ = addCmd.RegisterFlagCompletionFunc(credsFlagName, completion.AutocompleteNone) featuresFlagName := "features" @@ -79,9 +112,9 @@ func init() { flags.StringVar(&manifestAddOpts.OSVersion, osVersionFlagName, "", "override the OS `version` of the specified image") _ = addCmd.RegisterFlagCompletionFunc(osVersionFlagName, completion.AutocompleteNone) - flags.BoolVar(&manifestAddOpts.Insecure, "insecure", false, "neither require HTTPS nor verify certificates when accessing the registry") + flags.BoolVar(&manifestAddOpts.insecure, "insecure", false, "neither require HTTPS nor verify certificates when accessing the registry") _ = flags.MarkHidden("insecure") - flags.BoolVar(&manifestAddOpts.TLSVerifyCLI, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry") + flags.BoolVar(&manifestAddOpts.tlsVerifyCLI, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry") variantFlagName := "variant" flags.StringVar(&manifestAddOpts.Variant, variantFlagName, "", "override the `Variant` of the specified image") @@ -99,8 +132,8 @@ func add(cmd *cobra.Command, args []string) error { } } - if manifestAddOpts.CredentialsCLI != "" { - creds, err := util.ParseRegistryCreds(manifestAddOpts.CredentialsCLI) + if manifestAddOpts.credentialsCLI != "" { + creds, err := util.ParseRegistryCreds(manifestAddOpts.credentialsCLI) if err != nil { return err } @@ -113,18 +146,53 @@ func add(cmd *cobra.Command, args []string) error { // which is important to implement a sane way of dealing with defaults of // boolean CLI flags. if cmd.Flags().Changed("tls-verify") { - manifestAddOpts.SkipTLSVerify = types.NewOptionalBool(!manifestAddOpts.TLSVerifyCLI) + manifestAddOpts.SkipTLSVerify = types.NewOptionalBool(!manifestAddOpts.tlsVerifyCLI) } if cmd.Flags().Changed("insecure") { if manifestAddOpts.SkipTLSVerify != types.OptionalBoolUndefined { return errors.New("--insecure may not be used with --tls-verify") } - manifestAddOpts.SkipTLSVerify = types.NewOptionalBool(manifestAddOpts.Insecure) + manifestAddOpts.SkipTLSVerify = types.NewOptionalBool(manifestAddOpts.insecure) + } + + if !manifestAddOpts.artifact { + var changedArtifactFlags []string + for _, artifactOption := range []string{"artifact-type", "artifact-config", "artifact-config-type", "artifact-layer-type", "artifact-subject", "artifact-exclude-titles"} { + if cmd.Flags().Changed(artifactOption) { + changedArtifactFlags = append(changedArtifactFlags, "--"+artifactOption) + } + } + switch { + case len(changedArtifactFlags) == 1: + return fmt.Errorf("%s requires --artifact", changedArtifactFlags[0]) + case len(changedArtifactFlags) > 1: + return fmt.Errorf("%s require --artifact", strings.Join(changedArtifactFlags, "/")) + } } - listID, err := registry.ImageEngine().ManifestAdd(context.Background(), args[0], args[1:], manifestAddOpts.ManifestAddOptions) - if err != nil { - return err + var listID string + var err error + if manifestAddOpts.artifact { + if cmd.Flags().Changed("artifact-type") { + manifestAddOpts.artifactOptions.Type = &manifestAddOpts.artifactType + } + if manifestAddOpts.artifactConfigFile != "" { + configBytes, err := os.ReadFile(manifestAddOpts.artifactConfigFile) + if err != nil { + return fmt.Errorf("%v", err) + } + manifestAddOpts.artifactOptions.Config = string(configBytes) + } + manifestAddOpts.artifactOptions.ManifestAnnotateOptions = manifestAddOpts.ManifestAnnotateOptions + listID, err = registry.ImageEngine().ManifestAddArtifact(context.Background(), args[0], args[1:], manifestAddOpts.artifactOptions) + if err != nil { + return err + } + } else { + listID, err = registry.ImageEngine().ManifestAdd(context.Background(), args[0], args[1:], manifestAddOpts.ManifestAddOptions) + if err != nil { + return err + } } fmt.Println(listID) return nil diff --git a/cmd/podman/manifest/annotate.go b/cmd/podman/manifest/annotate.go index e5dae5649b..60765a9b22 100644 --- a/cmd/podman/manifest/annotate.go +++ b/cmd/podman/manifest/annotate.go @@ -1,7 +1,9 @@ package manifest import ( + "errors" "fmt" + "strings" "github.com/containers/common/pkg/completion" "github.com/containers/podman/v5/cmd/podman/common" @@ -10,14 +12,22 @@ import ( "github.com/spf13/cobra" ) +// manifestAnnotateOptsWrapper wraps entities.ManifestAnnotateOptions and +// prevents us from having to add CLI-only fields to the API types. +type manifestAnnotateOptsWrapper struct { + entities.ManifestAnnotateOptions + annotations []string + index bool +} + var ( - manifestAnnotateOpts = entities.ManifestAnnotateOptions{} + manifestAnnotateOpts = manifestAnnotateOptsWrapper{} annotateCmd = &cobra.Command{ - Use: "annotate [options] LIST IMAGE", + Use: "annotate [options] LIST IMAGEORARTIFACT", Short: "Add or update information about an entry in a manifest list or image index", Long: "Adds or updates information about an entry in a manifest list or image index.", RunE: annotate, - Args: cobra.ExactArgs(2), + Args: cobra.RangeArgs(1, 2), Example: `podman manifest annotate --annotation left=right mylist:v1.11 sha256:15352d97781ffdf357bf3459c037be3efac4133dc9070c2dce7eca7c05c3e736`, ValidArgsFunction: common.AutocompleteImages, } @@ -31,36 +41,85 @@ func init() { flags := annotateCmd.Flags() annotationFlagName := "annotation" - flags.StringArrayVar(&manifestAnnotateOpts.Annotation, annotationFlagName, nil, "set an `annotation` for the specified image") + flags.StringArrayVar(&manifestAnnotateOpts.annotations, annotationFlagName, nil, "set an `annotation` for the specified image or artifact") _ = annotateCmd.RegisterFlagCompletionFunc(annotationFlagName, completion.AutocompleteNone) archFlagName := "arch" - flags.StringVar(&manifestAnnotateOpts.Arch, archFlagName, "", "override the `architecture` of the specified image") + flags.StringVar(&manifestAnnotateOpts.Arch, archFlagName, "", "override the `architecture` of the specified image or artifact") _ = annotateCmd.RegisterFlagCompletionFunc(archFlagName, completion.AutocompleteArch) featuresFlagName := "features" - flags.StringSliceVar(&manifestAnnotateOpts.Features, featuresFlagName, nil, "override the `features` of the specified image") + flags.StringSliceVar(&manifestAnnotateOpts.Features, featuresFlagName, nil, "override the `features` of the specified image or artifact") _ = annotateCmd.RegisterFlagCompletionFunc(featuresFlagName, completion.AutocompleteNone) + indexFlagName := "index" + flags.BoolVar(&manifestAnnotateOpts.index, indexFlagName, false, "apply --"+annotationFlagName+" values to the image index itself") + osFlagName := "os" - flags.StringVar(&manifestAnnotateOpts.OS, osFlagName, "", "override the `OS` of the specified image") + flags.StringVar(&manifestAnnotateOpts.OS, osFlagName, "", "override the `OS` of the specified image or artifact") _ = annotateCmd.RegisterFlagCompletionFunc(osFlagName, completion.AutocompleteOS) osFeaturesFlagName := "os-features" - flags.StringSliceVar(&manifestAnnotateOpts.OSFeatures, osFeaturesFlagName, nil, "override the OS `features` of the specified image") + flags.StringSliceVar(&manifestAnnotateOpts.OSFeatures, osFeaturesFlagName, nil, "override the OS `features` of the specified image or artifact") _ = annotateCmd.RegisterFlagCompletionFunc(osFeaturesFlagName, completion.AutocompleteNone) osVersionFlagName := "os-version" - flags.StringVar(&manifestAnnotateOpts.OSVersion, osVersionFlagName, "", "override the OS `version` of the specified image") + flags.StringVar(&manifestAnnotateOpts.OSVersion, osVersionFlagName, "", "override the OS `version` of the specified image or artifact") _ = annotateCmd.RegisterFlagCompletionFunc(osVersionFlagName, completion.AutocompleteNone) variantFlagName := "variant" - flags.StringVar(&manifestAnnotateOpts.Variant, variantFlagName, "", "override the `Variant` of the specified image") + flags.StringVar(&manifestAnnotateOpts.Variant, variantFlagName, "", "override the `Variant` of the specified image or artifact") _ = annotateCmd.RegisterFlagCompletionFunc(variantFlagName, completion.AutocompleteNone) + + subjectFlagName := "subject" + flags.StringVar(&manifestAnnotateOpts.IndexSubject, subjectFlagName, "", "set the `subject` to which the image index refers") + _ = annotateCmd.RegisterFlagCompletionFunc(subjectFlagName, completion.AutocompleteNone) } func annotate(cmd *cobra.Command, args []string) error { - id, err := registry.ImageEngine().ManifestAnnotate(registry.Context(), args[0], args[1], manifestAnnotateOpts) + var listImageSpec, instanceSpec string + switch len(args) { + case 1: + listImageSpec = args[0] + if listImageSpec == "" { + return fmt.Errorf(`invalid image name "%s"`, args[0]) + } + if !manifestAnnotateOpts.index { + return errors.New(`expected an instance digest, image name, or artifact name`) + } + case 2: + listImageSpec = args[0] + if listImageSpec == "" { + return fmt.Errorf(`invalid image name "%s"`, args[0]) + } + if manifestAnnotateOpts.index { + return fmt.Errorf(`did not expect image or artifact name "%s" when modifying the entire index`, args[1]) + } + instanceSpec = args[1] + if instanceSpec == "" { + return fmt.Errorf(`invalid instance digest, image name, or artifact name "%s"`, instanceSpec) + } + default: + return errors.New("expected either a list name and --index or a list name and an image digest or image name or artifact name") + } + opts := manifestAnnotateOpts.ManifestAnnotateOptions + var annotations map[string]string + for _, annotation := range manifestAnnotateOpts.annotations { + k, v, parsed := strings.Cut(annotation, "=") + if !parsed { + return fmt.Errorf("expected --annotation %q to be in key=value format", annotation) + } + if annotations == nil { + annotations = make(map[string]string) + } + annotations[k] = v + } + if manifestAnnotateOpts.index { + opts.IndexAnnotations = annotations + } else { + opts.Annotations = annotations + } + id, err := registry.ImageEngine().ManifestAnnotate(registry.Context(), args[0], args[1], opts) if err != nil { return err } diff --git a/cmd/podman/manifest/create.go b/cmd/podman/manifest/create.go index eb8d6ad517..4e953ab5a2 100644 --- a/cmd/podman/manifest/create.go +++ b/cmd/podman/manifest/create.go @@ -3,7 +3,9 @@ package manifest import ( "errors" "fmt" + "strings" + "github.com/containers/common/pkg/completion" "github.com/containers/image/v5/types" "github.com/containers/podman/v5/cmd/podman/common" "github.com/containers/podman/v5/cmd/podman/registry" @@ -15,8 +17,8 @@ import ( // CLI-only fields into the API types. type manifestCreateOptsWrapper struct { entities.ManifestCreateOptions - - TLSVerifyCLI, Insecure bool // CLI only + annotations []string // CLI only + tlsVerifyCLI, insecure bool // CLI only } var ( @@ -43,9 +45,11 @@ func init() { flags := createCmd.Flags() flags.BoolVar(&manifestCreateOpts.All, "all", false, "add all of the lists' images if the images to add are lists") flags.BoolVarP(&manifestCreateOpts.Amend, "amend", "a", false, "modify an existing list if one with the desired name already exists") - flags.BoolVar(&manifestCreateOpts.Insecure, "insecure", false, "neither require HTTPS nor verify certificates when accessing the registry") + flags.BoolVar(&manifestCreateOpts.insecure, "insecure", false, "neither require HTTPS nor verify certificates when accessing the registry") + flags.StringArrayVar(&manifestCreateOpts.annotations, "annotation", nil, "set annotations on the new list") + _ = createCmd.RegisterFlagCompletionFunc("annotation", completion.AutocompleteNone) _ = flags.MarkHidden("insecure") - flags.BoolVar(&manifestCreateOpts.TLSVerifyCLI, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry") + flags.BoolVar(&manifestCreateOpts.tlsVerifyCLI, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry") } func create(cmd *cobra.Command, args []string) error { @@ -54,13 +58,23 @@ func create(cmd *cobra.Command, args []string) error { // which is important to implement a sane way of dealing with defaults of // boolean CLI flags. if cmd.Flags().Changed("tls-verify") { - manifestCreateOpts.SkipTLSVerify = types.NewOptionalBool(!manifestCreateOpts.TLSVerifyCLI) + manifestCreateOpts.SkipTLSVerify = types.NewOptionalBool(!manifestCreateOpts.tlsVerifyCLI) } if cmd.Flags().Changed("insecure") { if manifestCreateOpts.SkipTLSVerify != types.OptionalBoolUndefined { return errors.New("--insecure may not be used with --tls-verify") } - manifestCreateOpts.SkipTLSVerify = types.NewOptionalBool(manifestCreateOpts.Insecure) + manifestCreateOpts.SkipTLSVerify = types.NewOptionalBool(manifestCreateOpts.insecure) + } + for _, annotation := range manifestCreateOpts.annotations { + k, v, parsed := strings.Cut(annotation, "=") + if !parsed { + return fmt.Errorf("expected --annotation %q to be in key=value format", annotation) + } + if manifestCreateOpts.Annotations == nil { + manifestCreateOpts.Annotations = make(map[string]string) + } + manifestCreateOpts.Annotations[k] = v } imageID, err := registry.ImageEngine().ManifestCreate(registry.Context(), args[0], args[1:], manifestCreateOpts.ManifestCreateOptions) diff --git a/cmd/podman/networks/inspect.go b/cmd/podman/networks/inspect.go index 6bb10287b5..f2962fdeca 100644 --- a/cmd/podman/networks/inspect.go +++ b/cmd/podman/networks/inspect.go @@ -1,7 +1,6 @@ package network import ( - "github.com/containers/common/libnetwork/types" "github.com/containers/podman/v5/cmd/podman/common" "github.com/containers/podman/v5/cmd/podman/inspect" "github.com/containers/podman/v5/cmd/podman/registry" @@ -33,7 +32,7 @@ func init() { formatFlagName := "format" flags.StringVarP(&inspectOpts.Format, formatFlagName, "f", "", "Pretty-print network to JSON or using a Go template") - _ = networkinspectCommand.RegisterFlagCompletionFunc(formatFlagName, common.AutocompleteFormat(&types.Network{})) + _ = networkinspectCommand.RegisterFlagCompletionFunc(formatFlagName, common.AutocompleteFormat(&entities.NetworkInspectReport{})) } func networkInspect(_ *cobra.Command, args []string) error { diff --git a/cmd/podman/networks/list.go b/cmd/podman/networks/list.go index 8976cb41b9..8a47020e64 100644 --- a/cmd/podman/networks/list.go +++ b/cmd/podman/networks/list.go @@ -22,6 +22,7 @@ var ( networklistDescription = `List networks` networklistCommand = &cobra.Command{ Use: "ls [options]", + Aliases: []string{"list"}, Args: validate.NoArgs, Short: "List networks", Long: networklistDescription, diff --git a/cmd/podman/pods/rm.go b/cmd/podman/pods/rm.go index b96ef15d0a..ac5bde0807 100644 --- a/cmd/podman/pods/rm.go +++ b/cmd/podman/pods/rm.go @@ -83,6 +83,10 @@ func rm(cmd *cobra.Command, args []string) error { rmOptions.Timeout = &timeout } + if rmOptions.Force { + rmOptions.Ignore = true + } + errs = append(errs, removePods(args, rmOptions.PodRmOptions, true)...) for _, idFile := range rmOptions.PodIDFiles { @@ -110,9 +114,6 @@ func removePods(namesOrIDs []string, rmOptions entities.PodRmOptions, printIDs b responses, err := registry.ContainerEngine().PodRm(context.Background(), namesOrIDs, rmOptions) if err != nil { - if rmOptions.Force && strings.Contains(err.Error(), define.ErrNoSuchPod.Error()) { - return nil - } setExitCode(err) errs = append(errs, err) if !strings.Contains(err.Error(), define.ErrRemovingCtrs.Error()) { @@ -127,9 +128,6 @@ func removePods(namesOrIDs []string, rmOptions entities.PodRmOptions, printIDs b fmt.Println(r.Id) } } else { - if rmOptions.Force && strings.Contains(r.Err.Error(), define.ErrNoSuchPod.Error()) { - continue - } setExitCode(r.Err) errs = append(errs, r.Err) for ctr, err := range r.RemovedCtrs { diff --git a/cmd/podman/pods/start.go b/cmd/podman/pods/start.go index 7e96781659..79b3f72162 100644 --- a/cmd/podman/pods/start.go +++ b/cmd/podman/pods/start.go @@ -39,9 +39,7 @@ var ( } ) -var ( - startOptions = podStartOptionsWrapper{} -) +var startOptions = podStartOptionsWrapper{} func init() { registry.Commands = append(registry.Commands, registry.CliCommand{ @@ -60,9 +58,7 @@ func init() { } func start(cmd *cobra.Command, args []string) error { - var ( - errs utils.OutputErrors - ) + var errs utils.OutputErrors ids, err := specgenutil.ReadPodIDFiles(startOptions.PodIDFiles) if err != nil { @@ -77,7 +73,7 @@ func start(cmd *cobra.Command, args []string) error { // in the cli, first we print out all the successful attempts for _, r := range responses { if len(r.Errs) == 0 { - fmt.Println(r.Id) + fmt.Println(r.RawInput) } else { errs = append(errs, r.Errs...) } diff --git a/cmd/podman/pods/stop.go b/cmd/podman/pods/stop.go index 6805f068a9..7e9c11f3cf 100644 --- a/cmd/podman/pods/stop.go +++ b/cmd/podman/pods/stop.go @@ -71,9 +71,7 @@ func init() { } func stop(cmd *cobra.Command, args []string) error { - var ( - errs utils.OutputErrors - ) + var errs utils.OutputErrors if cmd.Flag("time").Changed { stopOptions.Timeout = stopOptions.timeoutCLI } @@ -91,7 +89,7 @@ func stop(cmd *cobra.Command, args []string) error { // in the cli, first we print out all the successful attempts for _, r := range responses { if len(r.Errs) == 0 { - fmt.Println(r.Id) + fmt.Println(r.RawInput) } else { errs = append(errs, r.Errs...) } diff --git a/cmd/podman/registry/config.go b/cmd/podman/registry/config.go index d3d7ad8be7..6a6d3adb70 100644 --- a/cmd/podman/registry/config.go +++ b/cmd/podman/registry/config.go @@ -11,6 +11,7 @@ import ( "github.com/containers/podman/v5/pkg/domain/entities" "github.com/containers/podman/v5/pkg/rootless" "github.com/containers/podman/v5/pkg/util" + "github.com/containers/storage/pkg/fileutils" "github.com/spf13/cobra" "github.com/spf13/pflag" ) @@ -145,7 +146,7 @@ func setXdgDirs() error { if _, found := os.LookupEnv("DBUS_SESSION_BUS_ADDRESS"); !found { sessionAddr := filepath.Join(os.Getenv("XDG_RUNTIME_DIR"), "bus") - if _, err := os.Stat(sessionAddr); err == nil { + if err := fileutils.Exists(sessionAddr); err == nil { sessionAddr, err = filepath.EvalSymlinks(sessionAddr) if err != nil { return err @@ -166,3 +167,19 @@ func setXdgDirs() error { } return nil } + +func RetryDefault() uint { + if IsRemote() { + return 0 + } + + return PodmanConfig().ContainersConfDefaultsRO.Engine.Retry +} + +func RetryDelayDefault() string { + if IsRemote() { + return "" + } + + return PodmanConfig().ContainersConfDefaultsRO.Engine.RetryDelay +} diff --git a/cmd/podman/root.go b/cmd/podman/root.go index aaa992b676..a373a3aa18 100644 --- a/cmd/podman/root.go +++ b/cmd/podman/root.go @@ -175,6 +175,12 @@ func readRemoteCliFlags(cmd *cobra.Command, podmanConfig *entities.PodmanConfig) } case host.Changed: podmanConfig.URI = host.Value.String() + default: + // No cli options set, in case CONTAINER_CONNECTION was set to something + // invalid this contains the error, see setupRemoteConnection(). + // Important so that we can show a proper useful error message but still + // allow the cli overwrites (https://github.com/containers/podman/pull/22997). + return podmanConfig.ConnectionError } return nil } @@ -185,7 +191,8 @@ func readRemoteCliFlags(cmd *cobra.Command, podmanConfig *entities.PodmanConfig) // 2. Env variables (CONTAINER_HOST and CONTAINER_CONNECTION); // 3. ActiveService from containers.conf; // 4. RemoteURI; -func setupRemoteConnection(podmanConfig *entities.PodmanConfig) error { +// Returns the name of the default connection if any. +func setupRemoteConnection(podmanConfig *entities.PodmanConfig) string { conf := podmanConfig.ContainersConfDefaultsRO connEnv, hostEnv, sshkeyEnv := os.Getenv("CONTAINER_CONNECTION"), os.Getenv("CONTAINER_HOST"), os.Getenv("CONTAINER_SSHKEY") @@ -193,11 +200,13 @@ func setupRemoteConnection(podmanConfig *entities.PodmanConfig) error { case connEnv != "": con, err := conf.GetConnection(connEnv, false) if err != nil { - return err + podmanConfig.ConnectionError = err + return connEnv } podmanConfig.URI = con.URI podmanConfig.Identity = con.Identity podmanConfig.MachineMode = con.IsMachine + return con.Name case hostEnv != "": if sshkeyEnv != "" { podmanConfig.Identity = sshkeyEnv @@ -209,11 +218,11 @@ func setupRemoteConnection(podmanConfig *entities.PodmanConfig) error { podmanConfig.URI = con.URI podmanConfig.Identity = con.Identity podmanConfig.MachineMode = con.IsMachine - } else { - podmanConfig.URI = registry.DefaultAPIAddress() + return con.Name } + podmanConfig.URI = registry.DefaultAPIAddress() } - return nil + return "" } func persistentPreRunE(cmd *cobra.Command, args []string) error { @@ -362,8 +371,12 @@ func persistentPreRunE(cmd *cobra.Command, args []string) error { // 3) command doesn't require Parent Namespace _, found := cmd.Annotations[registry.ParentNSRequired] if !registry.IsRemote() && !found { + cgroupMode := "" _, noMoveProcess := cmd.Annotations[registry.NoMoveProcess] - err := registry.ContainerEngine().SetupRootless(registry.Context(), noMoveProcess) + if flag := cmd.LocalFlags().Lookup("cgroups"); flag != nil { + cgroupMode = flag.Value.String() + } + err := registry.ContainerEngine().SetupRootless(registry.Context(), noMoveProcess, cgroupMode) if err != nil { return err } @@ -459,9 +472,8 @@ func stdOutHook() { } func rootFlags(cmd *cobra.Command, podmanConfig *entities.PodmanConfig) { - if err := setupRemoteConnection(podmanConfig); err != nil { - return - } + connectionName := setupRemoteConnection(podmanConfig) + lFlags := cmd.Flags() sshFlagName := "ssh" @@ -469,7 +481,7 @@ func rootFlags(cmd *cobra.Command, podmanConfig *entities.PodmanConfig) { _ = cmd.RegisterFlagCompletionFunc(sshFlagName, common.AutocompleteSSH) connectionFlagName := "connection" - lFlags.StringP(connectionFlagName, "c", podmanConfig.ContainersConfDefaultsRO.Engine.ActiveService, "Connection to use for remote Podman service") + lFlags.StringP(connectionFlagName, "c", connectionName, "Connection to use for remote Podman service (CONTAINER_CONNECTION)") _ = cmd.RegisterFlagCompletionFunc(connectionFlagName, common.AutocompleteSystemConnections) urlFlagName := "url" @@ -576,6 +588,8 @@ func rootFlags(cmd *cobra.Command, podmanConfig *entities.PodmanConfig) { pFlags.BoolVar(&podmanConfig.TransientStore, "transient-store", false, "Enable transient container storage") + pFlags.StringArrayVar(&podmanConfig.PullOptions, "pull-option", nil, "Specify an option to change how the image is pulled") + runtimeFlagName := "runtime" pFlags.StringVar(&podmanConfig.RuntimePath, runtimeFlagName, podmanConfig.ContainersConfDefaultsRO.Engine.OCIRuntime, "Path to the OCI-compatible binary used to run containers.") _ = cmd.RegisterFlagCompletionFunc(runtimeFlagName, completion.AutocompleteDefault) @@ -602,6 +616,7 @@ func rootFlags(cmd *cobra.Command, podmanConfig *entities.PodmanConfig) { "default-mounts-file", "max-workers", "memory-profile", + "pull-option", "registries-conf", "trace", } { diff --git a/cmd/podman/system/check.go b/cmd/podman/system/check.go new file mode 100644 index 0000000000..3a3f0348bc --- /dev/null +++ b/cmd/podman/system/check.go @@ -0,0 +1,138 @@ +package system + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/containers/common/pkg/completion" + "github.com/containers/podman/v5/cmd/podman/registry" + "github.com/containers/podman/v5/cmd/podman/validate" + "github.com/containers/podman/v5/pkg/domain/entities/types" + multierror "github.com/hashicorp/go-multierror" + "github.com/spf13/cobra" +) + +var ( + checkOptions = types.SystemCheckOptions{} + checkDescription = ` + podman system check + + Check storage for consistency and remove anything that looks damaged +` + + checkCommand = &cobra.Command{ + Use: "check [options]", + Short: "Check storage consistency", + Args: validate.NoArgs, + Long: checkDescription, + RunE: check, + ValidArgsFunction: completion.AutocompleteNone, + Example: `podman system check`, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Command: checkCommand, + Parent: systemCmd, + }) + flags := checkCommand.Flags() + flags.BoolVarP(&checkOptions.Quick, "quick", "q", false, "Skip time-consuming checks. The default is to include time-consuming checks") + flags.BoolVarP(&checkOptions.Repair, "repair", "r", false, "Remove inconsistent images") + flags.BoolVarP(&checkOptions.RepairLossy, "force", "f", false, "Remove inconsistent images and containers") + flags.DurationP("max", "m", 24*time.Hour, "Maximum allowed age of unreferenced layers") + _ = checkCommand.RegisterFlagCompletionFunc("max", completion.AutocompleteNone) +} + +func check(cmd *cobra.Command, args []string) error { + flags := cmd.Flags() + if flags.Changed("max") { + maxAge, err := flags.GetDuration("max") + if err != nil { + return err + } + checkOptions.UnreferencedLayerMaximumAge = &maxAge + } + response, err := registry.ContainerEngine().SystemCheck(context.Background(), checkOptions) + if err != nil { + return err + } + + if err = printSystemCheckResults(response); err != nil { + return err + } + + if !checkOptions.Repair && !checkOptions.RepairLossy && response.Errors { + return errors.New("damage detected in local storage") + } + + recheckOptions := checkOptions + recheckOptions.Repair = false + recheckOptions.RepairLossy = false + if response, err = registry.ContainerEngine().SystemCheck(context.Background(), recheckOptions); err != nil { + return err + } + if response.Errors { + return errors.New("damage in local storage still present after repair attempt") + } + + return nil +} + +func printSystemCheckResults(report *types.SystemCheckReport) error { + if !report.Errors { + return nil + } + errorSlice := func(strs []string) []error { + if strs == nil { + return nil + } + errs := make([]error, len(strs)) + for i, s := range strs { + errs[i] = errors.New(s) + } + return errs + } + for damagedLayer, errorsSlice := range report.Layers { + merr := multierror.Append(nil, errorSlice(errorsSlice)...) + if err := merr.ErrorOrNil(); err != nil { + fmt.Printf("Damaged layer %s:\n%s", damagedLayer, err) + } + } + for _, removedLayer := range report.RemovedLayers { + fmt.Printf("Deleted damaged layer: %s\n", removedLayer) + } + for damagedROLayer, errorsSlice := range report.ROLayers { + merr := multierror.Append(nil, errorSlice(errorsSlice)...) + if err := merr.ErrorOrNil(); err != nil { + fmt.Printf("Damaged read-only layer %s:\n%s", damagedROLayer, err) + } + } + for damagedImage, errorsSlice := range report.Images { + merr := multierror.Append(nil, errorSlice(errorsSlice)...) + if err := merr.ErrorOrNil(); err != nil { + fmt.Printf("Damaged image %s:\n%s", damagedImage, err) + } + } + for removedImage := range report.RemovedImages { + fmt.Printf("Deleted damaged image: %s\n", removedImage) + } + for damagedROImage, errorsSlice := range report.ROImages { + merr := multierror.Append(nil, errorSlice(errorsSlice)...) + if err := merr.ErrorOrNil(); err != nil { + fmt.Printf("Damaged read-only image %s\n%s", damagedROImage, err) + } + } + for damagedContainer, errorsSlice := range report.Containers { + merr := multierror.Append(nil, errorSlice(errorsSlice)...) + if err := merr.ErrorOrNil(); err != nil { + fmt.Printf("Damaged container %s:\n%s", damagedContainer, err) + } + } + for removedContainer := range report.RemovedContainers { + fmt.Printf("Deleted damaged container: %s\n", removedContainer) + } + return nil +} diff --git a/cmd/podman/system/connection/list.go b/cmd/podman/system/connection/list.go index 75dd8846df..24e6b29d29 100644 --- a/cmd/podman/system/connection/list.go +++ b/cmd/podman/system/connection/list.go @@ -3,6 +3,7 @@ package connection import ( "fmt" "os" + "slices" "sort" "github.com/containers/common/pkg/completion" @@ -13,7 +14,6 @@ import ( "github.com/containers/podman/v5/cmd/podman/system" "github.com/containers/podman/v5/cmd/podman/validate" "github.com/spf13/cobra" - "golang.org/x/exp/slices" ) var ( diff --git a/cmd/podman/system/connection/remove.go b/cmd/podman/system/connection/remove.go index 75975b9a0b..225b1f038f 100644 --- a/cmd/podman/system/connection/remove.go +++ b/cmd/podman/system/connection/remove.go @@ -2,13 +2,13 @@ package connection import ( "errors" + "slices" "github.com/containers/common/pkg/config" "github.com/containers/podman/v5/cmd/podman/common" "github.com/containers/podman/v5/cmd/podman/registry" "github.com/containers/podman/v5/cmd/podman/system" "github.com/spf13/cobra" - "golang.org/x/exp/slices" ) var ( diff --git a/cmd/podman/system/events.go b/cmd/podman/system/events.go index fbda9dc300..44b24319c0 100644 --- a/cmd/podman/system/events.go +++ b/cmd/podman/system/events.go @@ -49,6 +49,56 @@ var ( noTrunc bool ) +type Event struct { + // containerExitCode is for storing the exit code of a container which can + // be used for "internal" event notification + ContainerExitCode *int `json:",omitempty"` + // ID can be for the container, image, volume, etc + ID string `json:",omitempty"` + // Image used where applicable + Image string `json:",omitempty"` + // Name where applicable + Name string `json:",omitempty"` + // Network is the network name in a network event + Network string `json:"network,omitempty"` + // Status describes the event that occurred + Status events.Status + // Time the event occurred + Time int64 `json:"time,omitempty"` + // timeNano the event occurred in nanoseconds + TimeNano int64 `json:"timeNano,omitempty"` + // Type of event that occurred + Type events.Type + // Health status of the current container + HealthStatus string `json:"health_status,omitempty"` + // Error code for certain events involving errors. + Error string `json:",omitempty"` + + events.Details +} + +func newEventFromLibpodEvent(e *events.Event) Event { + return Event{ + ContainerExitCode: e.ContainerExitCode, + ID: e.ID, + Image: e.Image, + Name: e.Name, + Network: e.Network, + Status: e.Status, + Time: e.Time.Unix(), + Type: e.Type, + HealthStatus: e.HealthStatus, + Details: e.Details, + TimeNano: e.Time.UnixNano(), + Error: e.Error, + } +} + +func (e *Event) ToJSONString() (string, error) { + b, err := json.Marshal(e) + return string(b), err +} + func init() { registry.Commands = append(registry.Commands, registry.CliCommand{ Command: systemEventsCommand, @@ -70,7 +120,7 @@ func eventsFlags(cmd *cobra.Command) { formatFlagName := "format" flags.StringVar(&eventFormat, formatFlagName, "", "format the output using a Go template") - _ = cmd.RegisterFlagCompletionFunc(formatFlagName, common.AutocompleteFormat(&events.Event{})) + _ = cmd.RegisterFlagCompletionFunc(formatFlagName, common.AutocompleteFormat(&Event{})) flags.BoolVar(&eventOptions.Stream, "stream", true, "stream events and do not exit when returning the last known event") @@ -112,8 +162,7 @@ func eventsCmd(cmd *cobra.Command, _ []string) error { } go func() { - err := registry.ContainerEngine().Events(context.Background(), eventOptions) - errChannel <- err + errChannel <- registry.ContainerEngine().Events(context.Background(), eventOptions) }() for { @@ -121,17 +170,25 @@ func eventsCmd(cmd *cobra.Command, _ []string) error { case event, ok := <-eventChannel: if !ok { // channel was closed we can exit + select { + case err := <-errChannel: + if err != nil { + return err + } + default: + } return nil } switch { case doJSON: - jsonStr, err := event.ToJSONString() + e := newEventFromLibpodEvent(event) + jsonStr, err := e.ToJSONString() if err != nil { return err } fmt.Println(jsonStr) case cmd.Flags().Changed("format"): - if err := rpt.Execute(event); err != nil { + if err := rpt.Execute(newEventFromLibpodEvent(event)); err != nil { return err } default: diff --git a/cmd/podman/system/reset_machine.go b/cmd/podman/system/reset_machine.go index ad249a1a91..35430dfb7d 100644 --- a/cmd/podman/system/reset_machine.go +++ b/cmd/podman/system/reset_machine.go @@ -3,9 +3,9 @@ package system import ( - "github.com/containers/podman/v5/pkg/machine" "github.com/containers/podman/v5/pkg/machine/connection" "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/env" p "github.com/containers/podman/v5/pkg/machine/provider" "github.com/containers/podman/v5/pkg/machine/shim" "github.com/containers/podman/v5/pkg/machine/vmconfigs" @@ -18,7 +18,7 @@ func resetMachine() error { if err != nil { return err } - dirs, err := machine.GetMachineDirs(provider.VMType()) + dirs, err := env.GetMachineDirs(provider.VMType()) if err != nil { return err } diff --git a/cmd/quadlet/main.go b/cmd/quadlet/main.go index 621f22c197..c443b05f26 100644 --- a/cmd/quadlet/main.go +++ b/cmd/quadlet/main.go @@ -49,12 +49,13 @@ var ( // Key: Extension // Value: Processing order for resource naming dependencies supportedExtensions = map[string]int{ - ".container": 3, + ".container": 4, ".volume": 2, - ".kube": 3, + ".kube": 4, ".network": 2, ".image": 1, - ".pod": 4, + ".build": 3, + ".pod": 5, } ) @@ -474,7 +475,7 @@ func warnIfAmbiguousName(unit *parser.UnitFile, group string) { if !ok { return } - if strings.HasSuffix(imageName, ".image") { + if strings.HasSuffix(imageName, ".build") || strings.HasSuffix(imageName, ".image") { return } if !isUnambiguousName(imageName) { @@ -499,6 +500,19 @@ func generatePodsInfoMap(units []*parser.UnitFile) map[string]*quadlet.PodInfo { return podsInfoMap } +func prefillBuiltImageNames(units []*parser.UnitFile, resourceNames map[string]string) { + for _, unit := range units { + if !strings.HasSuffix(unit.Filename, ".build") { + continue + } + + imageName := quadlet.GetBuiltImageName(unit) + if len(imageName) > 0 { + resourceNames[unit.Filename] = imageName + } + } +} + func main() { if err := process(); err != nil { Logf("%s", err.Error()) @@ -600,6 +614,12 @@ func process() error { // A map of network/volume unit file-names, against their calculated names, as needed by Podman. var resourceNames = make(map[string]string) + // Prefill resouceNames for .build files. This is significantly less complex than + // pre-computing all resourceNames for all Quadlet types (which is rather complex for a few + // types), but still breaks the dependency cycle between .volume and .build ([Volume] can + // have Image=some.build, and [Build] can have Volume=some.volume:/some-volume) + prefillBuiltImageNames(units, resourceNames) + for _, unit := range units { var service *parser.UnitFile var name string @@ -619,6 +639,8 @@ func process() error { case strings.HasSuffix(unit.Filename, ".image"): warnIfAmbiguousName(unit, quadlet.ImageGroup) service, name, err = quadlet.ConvertImage(unit) + case strings.HasSuffix(unit.Filename, ".build"): + service, name, err = quadlet.ConvertBuild(unit, resourceNames) case strings.HasSuffix(unit.Filename, ".pod"): service, err = quadlet.ConvertPod(unit, unit.Filename, podsInfoMap, resourceNames) default: diff --git a/cmd/rootlessport/main.go b/cmd/rootlessport/main.go index 9b24f04804..1f5692d10b 100644 --- a/cmd/rootlessport/main.go +++ b/cmd/rootlessport/main.go @@ -17,9 +17,9 @@ import ( "github.com/containernetworking/plugins/pkg/ns" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/rootlessport" - rkport "github.com/rootless-containers/rootlesskit/pkg/port" - rkbuiltin "github.com/rootless-containers/rootlesskit/pkg/port/builtin" - rkportutil "github.com/rootless-containers/rootlesskit/pkg/port/portutil" + rkport "github.com/rootless-containers/rootlesskit/v2/pkg/port" + rkbuiltin "github.com/rootless-containers/rootlesskit/v2/pkg/port/builtin" + rkportutil "github.com/rootless-containers/rootlesskit/v2/pkg/port/portutil" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -343,7 +343,7 @@ func child() error { errCh := make(chan error) go func() { d := rkbuiltin.NewChildDriver(os.Stderr) - dErr := d.RunChildDriver(opaque, quit) + dErr := d.RunChildDriver(opaque, quit, "") errCh <- dErr }() defer func() { diff --git a/cmd/rootlessport/wsl.go b/cmd/rootlessport/wsl.go index 01b0981896..b43c5f8c21 100644 --- a/cmd/rootlessport/wsl.go +++ b/cmd/rootlessport/wsl.go @@ -5,7 +5,7 @@ import ( "strings" "github.com/containers/common/pkg/machine" - rkport "github.com/rootless-containers/rootlesskit/pkg/port" + rkport "github.com/rootless-containers/rootlesskit/v2/pkg/port" ) // WSL machines do not relay ipv4 traffic to dual-stack ports, simulate instead diff --git a/cmd/rootlessport/wsl_test.go b/cmd/rootlessport/wsl_test.go index 2c95251cdf..ac23e22c50 100644 --- a/cmd/rootlessport/wsl_test.go +++ b/cmd/rootlessport/wsl_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/containers/common/pkg/machine" - "github.com/rootless-containers/rootlesskit/pkg/port" + "github.com/rootless-containers/rootlesskit/v2/pkg/port" "github.com/stretchr/testify/assert" ) diff --git a/cmd/winpath/main.go b/cmd/winpath/main.go index ca10a7dff0..f868999fdd 100644 --- a/cmd/winpath/main.go +++ b/cmd/winpath/main.go @@ -18,13 +18,19 @@ import ( type operation int const ( - HWND_BROADCAST = 0xFFFF - WM_SETTINGCHANGE = 0x001A - SMTO_ABORTIFHUNG = 0x0002 - ERR_BAD_ARGS = 0x000A - OPERATION_FAILED = 0x06AC - Environment = "Environment" - Add operation = iota + //nolint:stylecheck + HWND_BROADCAST = 0xFFFF + //nolint:stylecheck + WM_SETTINGCHANGE = 0x001A + //nolint:stylecheck + SMTO_ABORTIFHUNG = 0x0002 + //nolint:stylecheck + ERR_BAD_ARGS = 0x000A + //nolint:stylecheck + OPERATION_FAILED = 0x06AC + + Environment = "Environment" + Add operation = iota Remove Open NotSpecified @@ -143,6 +149,8 @@ func removePathFromRegistry(path string) error { return err } + // No point preallocating we can't know how big the array needs to be. + //nolint:prealloc var elements []string for _, element := range strings.Split(existing, ";") { if strings.EqualFold(element, path) { @@ -174,6 +182,7 @@ func broadcastEnvironmentChange() { user32 := syscall.NewLazyDLL("user32") proc := user32.NewProc("SendMessageTimeoutW") millis := 3000 + //nolint:dogsled _, _, _ = proc.Call(HWND_BROADCAST, WM_SETTINGCHANGE, 0, uintptr(unsafe.Pointer(env)), SMTO_ABORTIFHUNG, uintptr(millis), 0) } diff --git a/contrib/cirrus/CIModes.md b/contrib/cirrus/CIModes.md index 409334ab73..0156de4695 100644 --- a/contrib/cirrus/CIModes.md +++ b/contrib/cirrus/CIModes.md @@ -126,6 +126,10 @@ is removed. commit-change before Cirrus-CI will notice the draft-status update (i.e. pressing the re-run button **is not** good enough). +### Intended `[CI:ALL]` behavior: + +Run even the tasks that are skipped based on changed sources conditions otherwise. + ### Intended Branch tasks (and Cirrus-cron jobs): + *build* + swagger diff --git a/contrib/cirrus/cirrus_yaml_test.py b/contrib/cirrus/cirrus_yaml_test.py index 262ea4d2eb..cf62097e4f 100755 --- a/contrib/cirrus/cirrus_yaml_test.py +++ b/contrib/cirrus/cirrus_yaml_test.py @@ -26,8 +26,10 @@ def setUp(self): class TestDependsOn(TestCaseBase): ALL_TASK_NAMES = None - SUCCESS_DEPS_EXCLUDE = set(['success', 'bench_stuff', 'artifacts', - 'release', 'release_test']) + # All tasks must be listed as a dependency of one/more of these tasks + SUCCESS_DEPS_EXCLUDE = set(['build_success', 'success']) + # Tasks which do not influence any success aggregator (above) + NONSUCCESS_TASKS = set(['artifacts', 'release', 'release_test']) def setUp(self): super().setUp() @@ -36,25 +38,43 @@ def setUp(self): if key.endswith('_task')]) def test_dicts(self): - """Expected dictionaries are present and non-empty""" - self.assertIn('success_task', self.CIRRUS_YAML) - self.assertIn('success_task'.replace('_task', ''), self.ALL_TASK_NAMES) - self.assertIn('depends_on', self.CIRRUS_YAML['success_task']) - self.assertGreater(len(self.CIRRUS_YAML['success_task']['depends_on']), 0) + """Specific tasks exist and always have non-empty depends_on""" + for task_name in self.SUCCESS_DEPS_EXCLUDE | self.NONSUCCESS_TASKS: + with self.subTest(task_name=task_name): + msg = ('Expecting to find a "{0}" task'.format(task_name)) + self.assertIn(task_name, self.ALL_TASK_NAMES, msg=msg) + task = self.CIRRUS_YAML[task_name + '_task'] + self.assertGreater(len(task['depends_on']), 0) def test_task(self): """There is no task named 'task'""" self.assertNotIn('task', self.ALL_TASK_NAMES) def test_depends(self): - """Success task depends on all other tasks""" - success_deps = set(self.CIRRUS_YAML['success_task']['depends_on']) - for task_name in self.ALL_TASK_NAMES - self.SUCCESS_DEPS_EXCLUDE: + """Success aggregator tasks contain dependencies for all other tasks""" + success_deps = set() + for task_name in self.SUCCESS_DEPS_EXCLUDE: + success_deps |= set(self.CIRRUS_YAML[task_name + '_task']['depends_on']) + for task_name in self.ALL_TASK_NAMES - self.SUCCESS_DEPS_EXCLUDE - self.NONSUCCESS_TASKS: with self.subTest(task_name=task_name): - msg=('Please add "{0}" to the "depends_on" list in "success_task"' - "".format(task_name)) + msg=('No success aggregation task depends_on "{0}"'.format(task_name)) self.assertIn(task_name, success_deps, msg=msg) + def test_skips(self): + """2024-06 PR#23030: ugly but necessary duplication in skip conditions. Prevent typos or unwanted changes.""" + beginning = "$CIRRUS_PR != '' && $CIRRUS_CHANGE_TITLE !=~ '.*CI:ALL.*' && !changesInclude('.cirrus.yml', 'Makefile', 'contrib/cirrus/**', 'vendor/**', 'hack/**', 'version/rawversion/*') && " + real_source_changes = " && !(changesInclude('**/*.go', '**/*.c') && !changesIncludeOnly('test/**', 'pkg/machine/e2e/**'))" + + for task_name in self.ALL_TASK_NAMES: + task = self.CIRRUS_YAML[task_name + '_task'] + if 'skip' in task: + skip = task['skip'] + if 'changesInclude' in skip: + msg = ('{0}: invalid skip'.format(task_name)) + self.assertEqual(skip[:len(beginning)], beginning, msg=msg+": beginning part is wrong") + if 'changesIncludeOnly' in skip: + self.assertEqual(skip[len(skip)-len(real_source_changes):], real_source_changes, msg=msg+": changesIncludeOnly() part is wrong") + def not_task(self): """Ensure no task is named 'task'""" self.assertNotIn('task', self.ALL_TASK_NAMES) diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh index 0fa72e20ec..ea6d8a0de5 100644 --- a/contrib/cirrus/lib.sh +++ b/contrib/cirrus/lib.sh @@ -71,36 +71,27 @@ export CI="${CI:-false}" CIRRUS_CI="${CIRRUS_CI:-false}" CONTINUOUS_INTEGRATION="${CONTINUOUS_INTEGRATION:-false}" CIRRUS_REPO_NAME=${CIRRUS_REPO_NAME:-podman} -# Cirrus only sets $CIRRUS_BASE_SHA properly for PRs, but $EPOCH_TEST_COMMIT -# needs to be set from this value in order for `make validate` to run properly. -# When running get_ci_vm.sh, most $CIRRUS_xyz variables are empty. Attempt -# to accommodate both branch and get_ci_vm.sh testing by discovering the base -# branch SHA value. + # shellcheck disable=SC2154 -if [[ -z "$CIRRUS_BASE_SHA" ]] && [[ -z "$CIRRUS_TAG" ]] -then # Operating on a branch, or under `get_ci_vm.sh` - showrun echo "branch or get_ci_vm (CIRRUS_BASE_SHA and CIRRUS_TAG are unset)" - CIRRUS_BASE_SHA=$(git rev-parse ${UPSTREAM_REMOTE:-origin}/$DEST_BRANCH) -elif [[ -z "$CIRRUS_BASE_SHA" ]] -then # Operating on a tag - showrun echo "operating on tag" - CIRRUS_BASE_SHA=$(git rev-parse HEAD) +if [[ -n "$CIRRUS_PR" ]] && [[ -z "$PR_BASE_SHA" ]]; then + # shellcheck disable=SC2154 + PR_BASE_SHA=$(git merge-base ${DEST_BRANCH:-main} HEAD) + export PR_BASE_SHA fi -# The starting place for linting and code validation -EPOCH_TEST_COMMIT="$CIRRUS_BASE_SHA" -# Regex defining all CI-related env. vars. necessary for all possible -# testing operations on all platforms and versions. This is necessary -# to avoid needlessly passing through global/system values across -# contexts, such as host->container or root->rootless user +# The next three values define regular expressions matching env. vars. necessary +# for all possible testing contexts (rootless, container, etc.). These values +# are consumed by the passthrough_envars() automation library function. # # List of envariables which must be EXACT matches -PASSTHROUGH_ENV_EXACT='CGROUP_MANAGER|DEST_BRANCH|DISTRO_NV|GOCACHE|GOPATH|GOSRC|NETWORK_BACKEND|OCI_RUNTIME|PODMAN_IGNORE_CGROUPSV1_WARNING|ROOTLESS_USER|SCRIPT_BASE|SKIP_USERNS|EC2_INST_TYPE|PODMAN_DB|STORAGE_FS' +PASSTHROUGH_ENV_EXACT='CGROUP_MANAGER|DEST_BRANCH|DISTRO_NV|GOCACHE|GOPATH|GOSRC|NETWORK_BACKEND|OCI_RUNTIME|PR_BASE_SHA|ROOTLESS_USER|SCRIPT_BASE|SKIP_USERNS|EC2_INST_TYPE|PODMAN_DB|STORAGE_FS|PODMAN_BATS_LEAK_CHECK' # List of envariable patterns which must match AT THE BEGINNING of the name. +# Consumed by the passthrough_envars() automation library function. PASSTHROUGH_ENV_ATSTART='CI|LANG|LC_|TEST' -# List of envariable patterns which can match ANYWHERE in the name +# List of envariable patterns which can match ANYWHERE in the name. +# Consumed by the passthrough_envars() automation library function. PASSTHROUGH_ENV_ANYWHERE='_NAME|_FQIN' # Unsafe env. vars for display @@ -159,6 +150,9 @@ setup_rootless() { showrun groupadd -g $rootless_gid $ROOTLESS_USER showrun useradd -g $rootless_gid -u $rootless_uid --no-user-group --create-home $ROOTLESS_USER + # use tmpfs to speed up IO + mount -t tmpfs -o size=75%,mode=0700,uid=$rootless_uid,gid=$rootless_gid none /home/$ROOTLESS_USER + echo "$ROOTLESS_USER ALL=(root) NOPASSWD: ALL" > /etc/sudoers.d/ci-rootless mkdir -p "$HOME/.ssh" "/home/$ROOTLESS_USER/.ssh" diff --git a/contrib/cirrus/logcollector.sh b/contrib/cirrus/logcollector.sh index 074b6743e5..1d4cee7d01 100755 --- a/contrib/cirrus/logcollector.sh +++ b/contrib/cirrus/logcollector.sh @@ -36,15 +36,16 @@ case $1 in packages) # These names are common to Fedora and Debian PKG_NAMES=(\ + aardvark-dns buildah conmon containernetworking-plugins - containers-common criu crun golang + netavark + passt podman - runc skopeo slirp4netns ) @@ -53,18 +54,17 @@ case $1 in cat /etc/fedora-release PKG_LST_CMD='rpm -q --qf=%{N}-%{V}-%{R}-%{ARCH}\n' PKG_NAMES+=(\ - aardvark-dns container-selinux + containers-common libseccomp - netavark - passt ) ;; debian) cat /etc/issue PKG_LST_CMD='dpkg-query --show --showformat=${Package}-${Version}-${Architecture}\n' PKG_NAMES+=(\ - cri-o-runc + golang-github-containers-common + golang-github-containers-image libseccomp2 ) ;; diff --git a/contrib/cirrus/logformatter b/contrib/cirrus/logformatter index 70734649e2..439c82f2fe 100755 --- a/contrib/cirrus/logformatter +++ b/contrib/cirrus/logformatter @@ -322,6 +322,16 @@ END_HTML # 1 12 3 34 4 5 526 $line =~ s{^(.*)(\/(containers\/[^/]+)(\/\S+\.py).*,\s+line\s+(\d+))(,\s+in.*)$} {$1$2$6}; + + # And, sigh, Macintosh always has to be different + # 1 123 3 4 425 5 + $line =~ s{^(.*/ci/task-\d+)((/\S+):(\d+))(.*)$} + {$1$2$5}; + + # ...as does Windows + # 1 123 3 4 435 5 + $line =~ s{^(.*/Local/cirrus-ci-build/repo)((/\S+):(\d+))(.*)$} + {$1$2$5} } # Try to identify the cirrus task @@ -598,6 +608,11 @@ END_HTML $current_output = ''; } + # Windows + # 1 12 2 3 3 + elsif ($line =~ /^(\s*)(C:\\Users.*\\podman\.exe)\s(.*)$/) { + $line = qq{$1C> podman.exe $3}; + } elsif ($line =~ /^\s*Error:/ || $line =~ / level=(warning|error) /) { $line = "" . $line . ""; } @@ -655,7 +670,7 @@ END_HTML # https://onsi.github.io/ginkgo/#generating-reports-programmatically $after_divider = 999; } - elsif ($line =~ m!^\s*/\S+!) { + elsif ($line =~ m!^\s*(/\S+|C:/)!) { # Source code path: skip } elsif ($line =~ / 'doesnt' @@ -848,6 +864,9 @@ END_SYNOPSIS $test_name =~ s/\\\s+/ /g; $s .= _tr("Test name", $test_name); + # Macs always have to be different + my $is_mac = ($test_name =~ /darwin/); + # Link to further Cirrus results, e.g. other runs. # Build is mostly boring, it's usually TASK that we want to see. $s .= _tr("Cirrus", sprintf("Build %s / Task %s", @@ -855,10 +874,11 @@ END_SYNOPSIS _a("{CIRRUS_TASK_ID}", "https://cirrus-ci.com/task/{CIRRUS_TASK_ID}"))); # Logs: link to original (unformatted) log; journal; and, if remote, server - my @logs = ( - _a("main", "https://api.cirrus-ci.com/v1/task/{CIRRUS_TASK_ID}/logs/main.log"), - _a("journal", "https://api.cirrus-ci.com/v1/task/{CIRRUS_TASK_ID}/logs/journal.log"), - ); + my @logs; + push @logs, _a("main", sprintf("https://api.cirrus-ci.com/v1/task/{CIRRUS_TASK_ID}/logs/%s.log", + ($is_mac ? 'test' : 'main'))); + push @logs, _a("journal", "https://api.cirrus-ci.com/v1/task/{CIRRUS_TASK_ID}/logs/journal.log") + unless $is_mac; # System tests are single-threaded, and have a server log available if ($test_name =~ /sys\s+remote\s/) { @@ -866,10 +886,13 @@ END_SYNOPSIS } $s .= _tr("Logs", join(" / ", @logs)); - # BASE_SHA can tell us if our parent includes--or doesn't--a purported - # fix for a flake. Note that "commits", plural, links to a git history - # listing; if we used "commit", singular, that would be less useful. - $s .= _tr("Base commit", _a("{CIRRUS_BASE_SHA}", "https://{CIRRUS_REPO_CLONE_HOST}/{CIRRUS_REPO_FULL_NAME}/commits/{CIRRUS_BASE_SHA}")); + # PR_BASE_SHA (set in lib.sh) can tell us if our parent includes--or + # doesn't--a purported fix for a flake. Note about the URL: "commits", plural, + # links to a git history listing; if we used "commit", singular, that would + # be less useful. + if (my $base = $ENV{PR_BASE_SHA}) { + $s .= _tr("Base commit", _a($base, "https://{CIRRUS_REPO_CLONE_HOST}/{CIRRUS_REPO_FULL_NAME}/commits/$base")); + } $s .= "\n"; return $s; diff --git a/contrib/cirrus/mac_runner.sh b/contrib/cirrus/mac_runner.sh new file mode 100755 index 0000000000..b19c3a2575 --- /dev/null +++ b/contrib/cirrus/mac_runner.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# +# This script is what runs mac tests. It is invoked from .cirrus.yml +# +# Yep, some of this is adapted from runner.sh. We can't actually +# use that as a library, because Macintosh bash and awk lack +# features we need. + +set -euo pipefail + + +# Name pattern for logformatter output file, derived from environment +function output_name() { + # .cirrus.yml defines this as a short readable string for web UI + std_name_fmt=$(sed -ne 's/^.*std_name_fmt \"\(.*\)\"/\1/p' <.cirrus.yml) + test -n "$std_name_fmt" || die "Could not grep 'std_name_fmt' from .cirrus.yml" + + # Interpolate envariables. 'set -u' throws fatal if any are undefined + ( + set -u + eval echo "$std_name_fmt" | tr ' ' '-' + ) +} + +function logformatter() { + # Mac awk barfs on this, syntax error +# awk --file "${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/timestamp.awk" \ + # shellcheck disable=SC2154 + "${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/logformatter" "$(output_name)" +} + +make localmachine 2>&1 | logformatter diff --git a/contrib/cirrus/mac_setup.sh b/contrib/cirrus/mac_setup.sh index 3ab9163baf..b51130247b 100755 --- a/contrib/cirrus/mac_setup.sh +++ b/contrib/cirrus/mac_setup.sh @@ -7,6 +7,14 @@ set -euo pipefail +# Confirm rosetta is installed/enabled and working +if ! arch -arch x86_64 /usr/bin/uname -m; then + # This likely means whatever script used to prepare this mac failed + # and/or did not execute `sudo softwareupdate --install-rosetta --agree-to-license` + echo "Rosetta doesn't appear to be installed, or is non-functional." + exit 1 +fi + # The otherwise standard `/etc/ci_environment` file cannot be used in this # context, because the system is shared for multiple tasks. Instead, persist # env. vars required during /subsequent/ testing steps via a "magic" Cirrus-CI @@ -31,3 +39,15 @@ echo "TMPDIR=/private/tmp/ci" >> $CIRRUS_ENV # Removed completely during cleanup. mkdir -p /private/tmp/ci + +# Add policy.json +mkdir -p $HOME/ci/.config/containers +cp pkg/machine/ocipull/policy.json /$HOME/ci/.config/containers/ + + +# Some test operations & checks require a git "identity" +# N/B: $HOME in this context does not include the /ci part automatically +# (see above) but it will when the next Cirrus-CI "_script" section +# takes over. +git config --file "$HOME/ci/.gitconfig" \ + --add safe.directory $CIRRUS_WORKING_DIR diff --git a/contrib/cirrus/pr-should-include-tests b/contrib/cirrus/pr-should-include-tests index 7c76cb90fa..bb9626ac13 100755 --- a/contrib/cirrus/pr-should-include-tests +++ b/contrib/cirrus/pr-should-include-tests @@ -2,17 +2,16 @@ # # Intended for use in CI: check git commits, barf if no tests added. # +ME=$(basename $0) + +# Github label which allows overriding this check +OVERRIDE_LABEL="No New Tests" # Docs-only changes are excused if [[ "${CIRRUS_CHANGE_TITLE}" =~ CI:DOCS ]]; then exit 0 fi -# So are PRs where 'NO NEW TESTS NEEDED' appears in the Github message -if [[ "${CIRRUS_CHANGE_MESSAGE}" =~ NO.NEW.TESTS.NEEDED ]]; then - exit 0 -fi - # HEAD should be good enough, but the CIRRUS envariable allows us to test head=${CIRRUS_CHANGE_IN_REPO:-HEAD} # Base of this PR. Here we absolutely rely on cirrus. @@ -38,6 +37,7 @@ filtered_changes=$(git diff --name-only $base $head | grep -F -vx go.sum | grep -F -vx podman.spec.rpkg | grep -F -vx .golangci.yml | + grep -F -vx winmake.ps1 | grep -E -v '/*Makefile$' | grep -E -v '^[^/]+\.md$' | grep -E -v '^.github' | @@ -51,14 +51,41 @@ if [[ -z "$filtered_changes" ]]; then exit 0 fi -# One last chance: perhaps the developer included the magic '[NO NEW TESTS NEEDED]' -# string in an amended commit. -if git log --format=%B ${base}..${head} | grep -F '[NO NEW TESTS NEEDED]'; then - exit 0 +# Nope. Only allow if the github 'no-tests-needed' label is set +if [[ -z "$CIRRUS_PR" ]]; then + echo "$ME: cannot query github: \$CIRRUS_PR is undefined" >&2 + exit 1 +fi +if [[ -z "$CIRRUS_REPO_CLONE_TOKEN" ]]; then + echo "$ME: cannot query github: \$CIRRUS_REPO_CLONE_TOKEN is undefined" >&2 + exit 1 +fi + +query="{ + \"query\": \"query { + repository(owner: \\\"containers\\\", name: \\\"podman\\\") { + pullRequest(number: $CIRRUS_PR) { + labels(first: 100) { + nodes { + name + } + } + } + } +}\" +}" + +result=$(curl -s -H "Authorization: bearer $CIRRUS_REPO_CLONE_TOKEN" -H "Accept: application/vnd.github.antiope-preview+json" -H "Content-Type: application/json" -X POST --data @- https://api.github.com/graphql <<<"$query") + +labels=$(jq -r '.data.repository.pullRequest.labels.nodes[].name' <<<"$result") + +if grep -F -x -q "$OVERRIDE_LABEL" <<<"$labels"; then + # PR has the label set + exit 0 fi cat <&2 + exit 1 +fi +export CIRRUS_REPO_CLONE_TOKEN="$GITHUB_TOKEN" + ############################################################################### # BEGIN test cases # @@ -26,19 +33,19 @@ ME=$(basename $0) # commit history, but once we do, please add a new '0' test here. # tests=" -0 68c9e02df db71759b1 PR 8821: multiple commits, includes tests -0 bb82c37b7 eeb4c129b PR 8832: single commit, w/tests, merge-base test -1 1f5927699 864592c74 PR 8685, multiple commits, no tests -0 7592f8fbb 6bbe54f2b PR 8766, no tests, but CI:DOCS in commit message -0 355e38769 bfbd915d6 PR 8884, a vendor bump -0 ffe2b1e95 e467400eb PR 8899, only .cirrus.yml -0 06a6fd9f2 3cc080151 PR 8695, docs-only, without CI:DOCS -0 a47515008 ecedda63a PR 8816, unit tests only -0 caa84cd35 e55320efd PR 8565, hack/podman-socat only -0 c342583da 12f835d12 PR 8523, version.go + podman.spec.in -0 8f75ed958 7b3ad6d89 PR 8835, only a README.md change -0 b6db60e58 f06dd45e0 PR 9420, a test rename -0 c6a896b0c 4ea5d6971 PR 11833, includes magic string +0 68c9e02df db71759b1 8821 multiple commits, includes tests +0 bb82c37b7 eeb4c129b 8832 single commit, w/tests, merge-base test +1 1f5927699 864592c74 8685 multiple commits, no tests +0 7592f8fbb 6bbe54f2b 8766 no tests, but CI:DOCS in commit message +0 355e38769 bfbd915d6 8884 a vendor bump +0 ffe2b1e95 e467400eb 8899 only .cirrus.yml +0 06a6fd9f2 3cc080151 8695 docs-only, without CI:DOCS +0 a47515008 ecedda63a 8816 unit tests only +0 caa84cd35 e55320efd 8565 hack/podman-socat only +0 c342583da 12f835d12 8523 version.go + podman.spec.in +0 8f75ed958 7b3ad6d89 8835 only a README.md change +0 b6db60e58 f06dd45e0 9420 a test rename +0 c6a896b0c 4ea5d6971 11833 includes magic string " # The script we're testing @@ -76,10 +83,10 @@ function run_test_script() { echo "# Actual: $output" rc=1 else - echo "ok $testnum $testname" + echo "ok $testnum $testname - rc=$expected_rc" fi else - echo "ok $testnum $testname" + echo "ok $testnum $testname - rc=$expected_rc" fi fi @@ -97,15 +104,6 @@ function run_test_script() { echo "ok $testnum $rest (override with CI:DOCS)" fi - testnum=$(( testnum + 1 )) - CIRRUS_CHANGE_MESSAGE="hi there [NO TESTS NEEDED] bye" $test_script &>/dev/null - if [[ $? -ne 0 ]]; then - echo "not ok $testnum $rest (override with '[NO TESTS NEEDED]')" - rc=1 - else - echo "ok $testnum $rest (override with '[NO TESTS NEEDED]')" - fi - tested_override=1 fi fi @@ -119,7 +117,7 @@ rc=0 testnum=0 tested_override= -while read expected_rc parent_sha commit_sha rest; do +while read expected_rc parent_sha commit_sha pr rest; do # Skip blank lines test -z "$expected_rc" && continue @@ -127,8 +125,9 @@ while read expected_rc parent_sha commit_sha rest; do export CIRRUS_CHANGE_IN_REPO=$commit_sha export CIRRUS_CHANGE_TITLE=$(git log -1 --format=%s $commit_sha) export CIRRUS_CHANGE_MESSAGE= + export CIRRUS_PR=$pr - run_test_script $expected_rc "$rest" + run_test_script $expected_rc "PR $pr - $rest" done <<<"$tests" echo "1..$testnum" diff --git a/contrib/cirrus/runner.sh b/contrib/cirrus/runner.sh index b2875680c9..e24ef73761 100755 --- a/contrib/cirrus/runner.sh +++ b/contrib/cirrus/runner.sh @@ -21,19 +21,11 @@ source $(dirname $0)/lib.sh showrun echo "starting" -function _run_validate() { - # TODO: aarch64 images need python3-devel installed - # https://github.com/containers/automation_images/issues/159 - showrun bigto ooe.sh dnf install -y python3-devel - - # git-validation tool fails if $EPOCH_TEST_COMMIT is empty - # shellcheck disable=SC2154 - if [[ -n "$EPOCH_TEST_COMMIT" ]]; then - showrun make validate - else - warn "Skipping git-validation since \$EPOCH_TEST_COMMIT is empty" - fi +function _run_validate-source() { + showrun make validate-source + # make sure PRs have tests + showrun make tests-included } function _run_unit() { @@ -57,12 +49,6 @@ function _run_apiv2() { ) |& logformatter } -function _run_compose() { - _bail_if_test_can_be_skipped test/compose - - showrun ./test/compose/test-compose |& logformatter -} - function _run_compose_v2() { _bail_if_test_can_be_skipped test/compose @@ -70,14 +56,10 @@ function _run_compose_v2() { } function _run_int() { - _bail_if_test_can_be_skipped test/e2e - dotest integration } function _run_sys() { - _bail_if_test_can_be_skipped test/system - dotest system } @@ -88,8 +70,6 @@ function _run_upgrade_test() { } function _run_bud() { - _bail_if_test_can_be_skipped test/buildah-bud - showrun ./test/buildah-bud/run-buildah-bud-tests |& logformatter } @@ -97,9 +77,6 @@ function _run_bindings() { # install ginkgo showrun make .install.ginkgo - # shellcheck disable=SC2155 - export PATH=$PATH:$GOSRC/hack:$GOSRC/test/tools/build - # if logformatter sees this, it can link directly to failing source lines local gitcommit_magic= if [[ -n "$GIT_COMMIT" ]]; then @@ -155,7 +132,8 @@ exec_container() { set -x # shellcheck disable=SC2154 exec bin/podman run --rm --privileged --net=host --cgroupns=host \ - -v `mktemp -d -p /var/tmp`:/tmp:Z \ + -v `mktemp -d -p /var/tmp`:/var/tmp:Z \ + --tmpfs /tmp:mode=1777 \ -v /dev/fuse:/dev/fuse \ -v "$GOPATH:$GOPATH:Z" \ --workdir "$GOSRC" \ @@ -171,9 +149,6 @@ function _run_swagger() { local envvarsfile req_env_vars GCPJSON GCPNAME GCPPROJECT CTR_FQIN - [[ -x /usr/local/bin/swagger ]] || \ - die "Expecting swagger binary to be present and executable." - # The filename and bucket depend on the automation context #shellcheck disable=SC2154,SC2153 if [[ -n "$CIRRUS_PR" ]]; then @@ -226,10 +201,18 @@ eof } function _run_build() { + local vb_target + + # There's no reason to validate-binaries across multiple linux platforms + # shellcheck disable=SC2154 + if [[ "$DISTRO_NV" =~ $FEDORA_NAME ]]; then + vb_target=validate-binaries + fi + # Ensure always start from clean-slate with all vendor modules downloaded showrun make clean showrun make vendor - showrun make podman-release # includes podman, podman-remote, and docs + showrun make podman-release $vb_target # includes podman, podman-remote, and docs # Last-minute confirmation that we're testing the desired runtime. # This Can't Possibly Fail™ in regular CI; only when updating VMs. @@ -260,6 +243,10 @@ function _run_altbuild() { cd $GOSRC case "$ALT_NAME" in *Each*) + if [[ -z "$CIRRUS_PR" ]]; then + echo ".....only meaningful on PRs" + return + fi showrun git fetch origin # The make-and-check-size script, introduced 2022-03-22 in #13518, # runs 'make' (the original purpose of this check) against @@ -271,7 +258,8 @@ function _run_altbuild() { context_dir=$(mktemp -d --tmpdir make-size-check.XXXXXXX) savedhead=$(git rev-parse HEAD) # Push to PR base. First run of the script will write size files - pr_base=$(git merge-base origin/$DEST_BRANCH HEAD) + # shellcheck disable=SC2154 + pr_base=$PR_BASE_SHA showrun git checkout $pr_base showrun hack/make-and-check-size $context_dir # pop back to PR, and run incremental makes. Subsequent script @@ -281,7 +269,8 @@ function _run_altbuild() { rm -rf $context_dir ;; *Windows*) - showrun make lint GOOS=windows || true # TODO: Enable when code passes check + showrun make .install.pre-commit + showrun make lint GOOS=windows CGO_ENABLED=0 showrun make podman-remote-release-windows_amd64.zip ;; *RPM*) @@ -421,12 +410,27 @@ dotest() { die "Found fallback podman '$fallback_podman' in \$PATH; tests require none, as a guarantee that we're testing the right binary." fi + # Catch invalid "TMPDIR == /tmp" assumptions; PR #19281 + TMPDIR=$(mktemp --tmpdir -d CI_XXXX) + # tmp dir is commonly 1777 to allow all user to read/write + chmod 1777 $TMPDIR + export TMPDIR + fstype=$(findmnt -n -o FSTYPE --target $TMPDIR) + if [[ "$fstype" != "tmpfs" ]]; then + die "The CI test TMPDIR is not on a tmpfs mount, we need tmpfs to make the tests faster" + fi + showrun make ${localremote}${testsuite} PODMAN_SERVER_LOG=$PODMAN_SERVER_LOG \ |& logformatter + + # FIXME: https://github.com/containers/podman/issues/22642 + # Cannot delete this due cleanup errors, as the VM is basically + # done after this anyway let's not block on this for now. + # rm -rf $TMPDIR + # unset TMPDIR } _run_machine-linux() { - # N/B: Can't use _bail_if_test_can_be_skipped here b/c content isn't under test/ showrun make localmachine |& logformatter } @@ -453,7 +457,9 @@ function _bail_if_test_can_be_skipped() { # Defined by Cirrus-CI for all tasks # shellcheck disable=SC2154 head=$CIRRUS_CHANGE_IN_REPO - base=$(git merge-base $DEST_BRANCH $head) + # shellcheck disable=SC2154 + base=$PR_BASE_SHA + echo "_bail_if_test_can_be_skipped: head=$head base=$base" diffs=$(git diff --name-only $base $head) # If PR touches any files in an argument directory, we cannot skip diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh index bf895be73a..f703ab32eb 100755 --- a/contrib/cirrus/setup_environment.sh +++ b/contrib/cirrus/setup_environment.sh @@ -38,14 +38,8 @@ do fi done -cp hack/podman-registry /bin - -# Some test operations & checks require a git "identity" -_gc='git config --file /root/.gitconfig' -showrun $_gc user.email "TMcTestFace@example.com" -showrun $_gc user.name "Testy McTestface" # Bypass git safety/security checks when operating in a throwaway environment -showrun git config --system --add safe.directory $GOSRC +showrun git config --global --add safe.directory $GOSRC # Ensure that all lower-level contexts and child-processes have # ready access to higher level orchestration (e.g Cirrus-CI) @@ -80,24 +74,9 @@ cd "${GOSRC}/" mkdir -p /etc/containers/containers.conf.d -# Defined by lib.sh: Does the host support cgroups v1 or v2? Use runc or crun -# respectively. -# **IMPORTANT**: $OCI_RUNTIME is a fakeout! It is used only in e2e tests. -# For actual podman, as in system tests, we force runtime in containers.conf -showrun echo "conditional check: CG_FS_TYPE [=$CG_FS_TYPE]" -case "$CG_FS_TYPE" in - tmpfs) - if ((CONTAINER==0)); then - warn "Forcing testing with runc instead of crun" - echo "OCI_RUNTIME=runc" >> /etc/ci_environment - printf "[engine]\nruntime=\"runc\"\n" > /etc/containers/containers.conf.d/90-runtime.conf - fi - ;; - cgroup2fs) - # Nothing to do: podman defaults to crun - ;; - *) die_unknown CG_FS_TYPE -esac +# Only cgroups v2 is supported, die if anything else. +[[ "$CG_FS_TYPE" == "cgroup2fs" ]] || \ + die "Only cgroups v2 CI VMs are supported, not: '$CG_FS_TYPE'" # For testing boltdb without having to use --db-backend. # As of #20318 (2023-10-10) sqlite is the default, so do not create @@ -138,12 +117,7 @@ fi # Which distribution are we testing on. case "$OS_RELEASE_ID" in debian) - showrun echo "more conditional setup for debian" - # FIXME 2023-04-11: workaround for runc regression causing failure - # in system tests: "skipping device /dev/char/10:200 for systemd" - # (Checked on 2023-08-08 and it's still too old: 1.1.5) - # FIXME: please remove this once runc >= 1.2 makes it into debian. - showrun modprobe tun + showrun echo "No-op conditional setup for debian" ;; fedora) showrun echo "conditional setup for fedora" @@ -193,10 +167,10 @@ runroot = "/run/containers/storage" graphroot = "/var/lib/containers/storage" EOF -# Since we've potentially changed important config settings, reset. -# This prevents `database graph driver "" does not match "overlay"` -# on Debian. -rm -rf /var/lib/containers/storage + +# mount a tmpfs for the container storage to speed up the IO +# side effect is we clear all potentially pre existing data so we know we always start "clean" +mount -t tmpfs -o size=75%,mode=0700 none /var/lib/containers # shellcheck disable=SC2154 showrun echo "Setting CI_DESIRED_STORAGE [=$CI_DESIRED_STORAGE] for *e2e* tests" @@ -208,6 +182,7 @@ showrun echo "about to set up for TEST_ENVIRON [=$TEST_ENVIRON]" case "$TEST_ENVIRON" in host) # The e2e tests wrongly guess `--cgroup-manager` option + # under some runtime contexts like rootless. # shellcheck disable=SC2154 if [[ "$CG_FS_TYPE" == "cgroup2fs" ]] || [[ "$PRIV_NAME" == "root" ]] then @@ -331,10 +306,8 @@ esac # shellcheck disable=SC2154 showrun echo "about to set up for TEST_FLAVOR [=$TEST_FLAVOR]" case "$TEST_FLAVOR" in - validate) - showrun dnf install -y $PACKAGE_DOWNLOAD_DIR/python3*.rpm - # For some reason, this is also needed for validation - showrun make .install.pre-commit .install.gitvalidation + validate-source) + # NOOP ;; altbuild) # Defined in .cirrus.yml @@ -347,8 +320,6 @@ case "$TEST_FLAVOR" in remove_packaged_podman_files showrun make install PREFIX=/usr ETCDIR=/etc - msg "Installing previously downloaded/cached packages" - showrun dnf install -y $PACKAGE_DOWNLOAD_DIR/python3*.rpm virtualenv .venv/docker-py source .venv/docker-py/bin/activate showrun pip install --upgrade pip @@ -371,15 +342,16 @@ case "$TEST_FLAVOR" in showrun pip install --upgrade pip showrun pip install --requirement $GOSRC/test/apiv2/python/requirements.txt ;& # continue with next item - compose) - showrun make install.tools - showrun dnf remove -y gvisor-tap-vsock - showrun dnf install -y podman-docker* - ;& # continue with next item int) showrun make .install.ginkgo ;& - sys) ;& + sys) + # when run nighlty check for system test leaks + # shellcheck disable=SC2154 + if [[ "$CIRRUS_CRON" != '' ]]; then + export PODMAN_BATS_LEAK_CHECK=1 + fi + ;& upgrade_test) ;& bud) ;& bindings) ;& @@ -421,7 +393,12 @@ case "$TEST_FLAVOR" in install_test_configs ;; machine-linux) - showrun dnf install -y podman-gvproxy* + showrun dnf install -y podman-gvproxy* virtiofsd + # Bootstrap this link if it isn't yet in the package; xref + # https://github.com/containers/podman/pull/22920 + if ! test -L /usr/libexec/podman/virtiofsd; then + showrun ln -sfr /usr/libexec/virtiofsd /usr/libexec/podman/virtiofsd + fi remove_packaged_podman_files showrun make install PREFIX=/usr ETCDIR=/etc install_test_configs diff --git a/contrib/cirrus/win-installer-main.ps1 b/contrib/cirrus/win-installer-main.ps1 index 8bf6764ac3..c951a01f68 100644 --- a/contrib/cirrus/win-installer-main.ps1 +++ b/contrib/cirrus/win-installer-main.ps1 @@ -2,22 +2,59 @@ . $PSScriptRoot\win-lib.ps1 -Set-Location "$ENV:CIRRUS_WORKING_DIR\repo\contrib\win-installer" +if ($Env:CI -eq "true") { + $WIN_INST_FOLDER = "$ENV:CIRRUS_WORKING_DIR\repo\contrib\win-installer" + $RELEASE_DIR = "$ENV:CIRRUS_WORKING_DIR\repo" +} else { + $WIN_INST_FOLDER = "$PSScriptRoot\..\win-installer" + $ENV:WIN_INST_VER = "9.9.9" + $RELEASE_DIR = "$PSScriptRoot\..\.." + $ENV:CONTAINERS_MACHINE_PROVIDER = "wsl" +} + +$ConfFilePath = "$env:ProgramData\containers\containers.conf.d\99-podman-machine-provider.conf" +$WindowsPathsToTest = @("C:\Program Files\RedHat\Podman\podman.exe", + "C:\Program Files\RedHat\Podman\win-sshproxy.exe", + "$ConfFilePath", + "HKLM:\SOFTWARE\Red Hat\Podman") + +Set-Location $WIN_INST_FOLDER # Build Installer # Note: consumes podman-remote-release-windows_amd64.zip from repo.tbz2 -Run-Command ".\build.ps1 $Env:WIN_INST_VER dev `"$ENV:CIRRUS_WORKING_DIR\repo`"" +Run-Command ".\build.ps1 $Env:WIN_INST_VER dev `"$RELEASE_DIR`"" -# Run the installer silently and WSL install option disabled (prevent reboots, wsl requirements) +# Run the installer silently and WSL/HyperV install options disabled (prevent reboots) # We need AllowOldWin=1 for server 2019 (cirrus image), can be dropped after server 2022 -$ret = Start-Process -Wait -PassThru ".\podman-${ENV:WIN_INST_VER}-dev-setup.exe" -ArgumentList "/install /quiet WSLCheckbox=0 AllowOldWin=1 /log inst.log" +$ret = Start-Process -Wait -PassThru ".\podman-${ENV:WIN_INST_VER}-dev-setup.exe" -ArgumentList "/install /quiet MachineProvider=$ENV:CONTAINERS_MACHINE_PROVIDER WSLCheckbox=0 HyperVCheckbox=0 AllowOldWin=1 /log inst.log" if ($ret.ExitCode -ne 0) { Write-Host "Install failed, dumping log" Get-Content inst.log throw "Exit code is $($ret.ExitCode)" } -if (! ((Test-Path -Path "C:\Program Files\RedHat\Podman\podman.exe") -and ` - (Test-Path -Path "C:\Program Files\RedHat\Podman\win-sshproxy.exe"))) { - throw "Expected podman.exe and win-sshproxy.exe, one or both not present after install" +$WindowsPathsToTest | ForEach-Object { + if (! (Test-Path -Path $_) ) { + throw "Expected $_ but it's not present after uninstall" + } } +$machineProvider = Get-Content $ConfFilePath | Select-Object -Skip 1 | ConvertFrom-StringData | ForEach-Object { $_.provider } +if ( $machineProvider -ne "`"$ENV:CONTAINERS_MACHINE_PROVIDER`"" ) { + throw "Expected `"$ENV:CONTAINERS_MACHINE_PROVIDER`" as default machine provider but got $machineProvider" +} + Write-Host "Installer verification successful!" + +# Run the uninstaller silently to verify that it cleans up properly +$ret = Start-Process -Wait -PassThru ".\podman-${ENV:WIN_INST_VER}-dev-setup.exe" -ArgumentList "/uninstall /quiet /log uninst.log" +if ($ret.ExitCode -ne 0) { + Write-Host "Uninstall failed, dumping log" + Get-Content uninst.log + throw "Exit code is $($ret.ExitCode)" +} +$WindowsPathsToTest | ForEach-Object { + if ( Test-Path -Path $_ ) { + throw "Path $_ is still present after uninstall" + } +} + +Write-Host "Uninstaller verification successful!" diff --git a/contrib/cirrus/win-lib.ps1 b/contrib/cirrus/win-lib.ps1 index ce22a69e74..9b61da1378 100644 --- a/contrib/cirrus/win-lib.ps1 +++ b/contrib/cirrus/win-lib.ps1 @@ -40,6 +40,28 @@ if ($Env:CI -eq "true") { Remove-Item Env:\CIRRUS_PR_BODY -ErrorAction:Ignore } +function Invoke-Logformatter { + param ( + [Collections.ArrayList] $unformattedLog + ) + + Write-Host "Invoking Logformatter" + $logFormatterInput = @('/define.gitCommit=' + $(git rev-parse HEAD)) + $unformattedLog + $logformatterPath = "$PSScriptRoot\logformatter" + if ($Env:TEST_FLAVOR) { + $logformatterArg = "$Env:TEST_FLAVOR-podman-windows-rootless-host-sqlite" + } else { + $logformatterArg = "podman-windows-rootless-host-sqlite" + } + $null = $logFormatterInput | perl $logformatterPath $logformatterArg + $logformatterGeneratedFile = "$logformatterArg.log.html" + if (Test-Path $logformatterGeneratedFile) { + Move-Item $logformatterGeneratedFile .. -Force + } else { + Write-Host "Logformatter did not generate the expected file: $logformatterGeneratedFile" + } +} + # Non-powershell commands do not halt execution on error! This helper # should be called after every critical operation to check and halt on a # non-zero exit code. Be careful not to use this for powershell commandlets @@ -48,14 +70,14 @@ if ($Env:CI -eq "true") { function Check-Exit { param ( [int] $stackPos = 1, - [string] $command = 'command' + [string] $command = 'command', + [string] $exitCode = $LASTEXITCODE # WARNING: might not be a number! ) - $result = $LASTEXITCODE # WARNING: might not be a number! - if ( ($result -ne $null) -and ($result -ne 0) ) { + if ( ($exitCode -ne $null) -and ($exitCode -ne 0) ) { # https://learn.microsoft.com/en-us/dotnet/api/system.management.automation.callstackframe $caller = (Get-PSCallStack)[$stackPos] - throw "Exit code = '$result' running $command at $($caller.ScriptName):$($caller.ScriptLineNumber)" + throw "Exit code = '$exitCode' running $command at $($caller.ScriptName):$($caller.ScriptLineNumber)" } } @@ -71,6 +93,18 @@ function Run-Command { Write-Host $command - Invoke-Expression $command - Check-Exit 2 "'$command'" + # The command output is saved into the variable $unformattedLog to be + # processed by `logformatter` later. The alternative is to redirect the + # command output to logformatter using a pipeline (`|`). But this approach + # doesn't work as the command exit code would be overridden by logformatter. + # It isn't possible to get a behavior of bash `pipefail` on Windows. + Invoke-Expression $command -OutVariable unformattedLog | Write-Output + + $exitCode = $LASTEXITCODE + + if ($Env:CIRRUS_CI -eq "true") { + Invoke-Logformatter $unformattedLog + } + + Check-Exit 2 "'$command'" "$exitCode" } diff --git a/contrib/cirrus/win-podman-machine-test.ps1 b/contrib/cirrus/win-podman-machine-test.ps1 index 5833d5bcc6..4d8ff59667 100644 --- a/contrib/cirrus/win-podman-machine-test.ps1 +++ b/contrib/cirrus/win-podman-machine-test.ps1 @@ -29,5 +29,9 @@ Set-Location "$ENV:CIRRUS_WORKING_DIR\repo" # Tests hard-code this location for podman-remote binary, make sure it actually runs. Run-Command ".\bin\windows\podman.exe --version" +# Add policy.json to filesystem for podman machine pulls +New-Item -ItemType "directory" -Path "$env:AppData\containers" +Copy-Item -Path pkg\machine\ocipull\policy.json -Destination "$env:AppData\containers" + Write-Host "`nRunning podman-machine e2e tests" Run-Command ".\winmake localmachine" diff --git a/contrib/pkginstaller/.gitignore b/contrib/pkginstaller/.gitignore index 5e597ab071..fb6313afb6 100644 --- a/contrib/pkginstaller/.gitignore +++ b/contrib/pkginstaller/.gitignore @@ -1,6 +1,6 @@ out Distribution welcome.html -tmp-download +tmp-bin .vscode root diff --git a/contrib/pkginstaller/Makefile b/contrib/pkginstaller/Makefile index c335dc194c..b9ac4b44ec 100644 --- a/contrib/pkginstaller/Makefile +++ b/contrib/pkginstaller/Makefile @@ -8,25 +8,31 @@ else endif GVPROXY_VERSION ?= 0.7.3 VFKIT_VERSION ?= 0.5.1 +KRUNKIT_VERSION ?= 0.1.1 GVPROXY_RELEASE_URL ?= https://github.com/containers/gvisor-tap-vsock/releases/download/v$(GVPROXY_VERSION)/gvproxy-darwin VFKIT_RELEASE_URL ?= https://github.com/crc-org/vfkit/releases/download/v$(VFKIT_VERSION)/vfkit-unsigned +KRUNKIT_RELEASE_URL ?= https://github.com/containers/krunkit/releases/download/v$(KRUNKIT_VERSION)/krunkit-podman-unsigned-$(KRUNKIT_VERSION).tgz PACKAGE_DIR ?= out/packaging -TMP_DOWNLOAD ?= tmp-download +TMP_BIN ?= tmp-bin PACKAGE_ROOT ?= root PKG_NAME := podman-installer-macos-$(GOARCH).pkg default: pkginstaller podman_version: - make -C ../../ test/version/version + make -B -C ../../ test/version/version -$(TMP_DOWNLOAD)/gvproxy: - mkdir -p $(TMP_DOWNLOAD) - cd $(TMP_DOWNLOAD) && curl -sLo gvproxy $(GVPROXY_RELEASE_URL) +$(TMP_BIN)/gvproxy: + mkdir -p $(TMP_BIN) + cd $(TMP_BIN) && curl -sLo gvproxy $(GVPROXY_RELEASE_URL) -$(TMP_DOWNLOAD)/vfkit: - mkdir -p $(TMP_DOWNLOAD) - cd $(TMP_DOWNLOAD) && curl -sLo vfkit $(VFKIT_RELEASE_URL) +$(TMP_BIN)/vfkit: + mkdir -p $(TMP_BIN) + cd $(TMP_BIN) && curl -sLo vfkit $(VFKIT_RELEASE_URL) + +$(TMP_BIN)/krunkit.tgz: + mkdir -p $(TMP_BIN) + cd $(TMP_BIN) && curl -sLo krunkit.tgz $(KRUNKIT_RELEASE_URL) packagedir: podman_version package_root Distribution welcome.html mkdir -p $(PACKAGE_DIR) @@ -40,13 +46,18 @@ packagedir: podman_version package_root Distribution welcome.html ../../test/version/version > $(PACKAGE_DIR)/VERSION echo -n $(ARCH) > $(PACKAGE_DIR)/ARCH cp ../../LICENSE $(PACKAGE_DIR)/Resources/LICENSE.txt - cp hvf.entitlements $(PACKAGE_DIR)/ + cp vfkit.entitlements $(PACKAGE_DIR)/ + cp krunkit.entitlements $(PACKAGE_DIR)/ -package_root: clean-pkgroot $(TMP_DOWNLOAD)/gvproxy $(TMP_DOWNLOAD)/vfkit +package_root: clean-pkgroot $(TMP_BIN)/gvproxy $(TMP_BIN)/vfkit $(TMP_BIN)/krunkit.tgz mkdir -p $(PACKAGE_ROOT)/podman/bin - cp $(TMP_DOWNLOAD)/gvproxy $(PACKAGE_ROOT)/podman/bin/ - cp $(TMP_DOWNLOAD)/vfkit $(PACKAGE_ROOT)/podman/bin/ + cp $(TMP_BIN)/gvproxy $(PACKAGE_ROOT)/podman/bin/ + cp $(TMP_BIN)/vfkit $(PACKAGE_ROOT)/podman/bin/ + tar xf $(TMP_BIN)/krunkit.tgz -C $(PACKAGE_ROOT)/podman chmod a+x $(PACKAGE_ROOT)/podman/bin/* + # Leaving for future considerations + # mkdir $(PACKAGE_ROOT)/podman/config + # cp ../../pkg/machine/ocipull/policy.json $(PACKAGE_ROOT)/podman/config/policy.json %: %.in podman_version @sed -e 's/__VERSION__/'$(shell ../../test/version/version)'/g' $< >$@ @@ -62,7 +73,7 @@ notarize: _notarize .PHONY: clean clean-pkgroot clean: - rm -rf $(TMP_DOWNLOAD) $(PACKAGE_ROOT) $(PACKAGE_DIR) Distribution welcome.html ../../test/version/version + rm -rf $(TMP_BIN) $(PACKAGE_ROOT) $(PACKAGE_DIR) out Distribution welcome.html ../../test/version/version clean-pkgroot: rm -rf $(PACKAGE_ROOT) $(PACKAGE_DIR) Distribution welcome.html diff --git a/contrib/pkginstaller/README.md b/contrib/pkginstaller/README.md index ff16629cc7..c33bcd77e0 100644 --- a/contrib/pkginstaller/README.md +++ b/contrib/pkginstaller/README.md @@ -1,13 +1,13 @@ ## How to build ```sh -$ make ARCH= NO_CODESIGN=1 pkginstaller +$ make ARCH= NO_CODESIGN=1 pkginstaller # or to create signed pkg -$ make ARCH= CODESIGN_IDENTITY= PRODUCTSIGN_IDENTITY= pkginstaller +$ make ARCH= CODESIGN_IDENTITY= PRODUCTSIGN_IDENTITY= pkginstaller # or to prepare a signed and notarized pkg for release -$ make ARCH= CODESIGN_IDENTITY= PRODUCTSIGN_IDENTITY= NOTARIZE_USERNAME= NOTARIZE_PASSWORD= NOTARIZE_TEAM= notarize +$ make ARCH= CODESIGN_IDENTITY= PRODUCTSIGN_IDENTITY= NOTARIZE_USERNAME= NOTARIZE_PASSWORD= NOTARIZE_TEAM= notarize ``` The generated pkg will be written to `out/podman-macos-installer-*.pkg`. diff --git a/contrib/pkginstaller/hvf.entitlements b/contrib/pkginstaller/hvf.entitlements deleted file mode 100644 index 154f3308ef..0000000000 --- a/contrib/pkginstaller/hvf.entitlements +++ /dev/null @@ -1,8 +0,0 @@ - - - - - com.apple.security.hypervisor - - - diff --git a/contrib/pkginstaller/krunkit.entitlements b/contrib/pkginstaller/krunkit.entitlements new file mode 100644 index 0000000000..a967593e0e --- /dev/null +++ b/contrib/pkginstaller/krunkit.entitlements @@ -0,0 +1,10 @@ + + + + + com.apple.security.hypervisor + + com.apple.security.cs.disable-library-validationr + + + diff --git a/contrib/pkginstaller/package.sh b/contrib/pkginstaller/package.sh index 6c75ca562d..4d2cd7cd48 100755 --- a/contrib/pkginstaller/package.sh +++ b/contrib/pkginstaller/package.sh @@ -4,35 +4,72 @@ set -euxo pipefail BASEDIR=$(dirname "$0") OUTPUT=$1 -CODESIGN_IDENTITY=${CODESIGN_IDENTITY:-mock} +CODESIGN_IDENTITY=${CODESIGN_IDENTITY:--} PRODUCTSIGN_IDENTITY=${PRODUCTSIGN_IDENTITY:-mock} NO_CODESIGN=${NO_CODESIGN:-0} HELPER_BINARIES_DIR="/opt/podman/bin" +MACHINE_POLICY_JSON_DIR="/opt/podman/config" + +tmpBin="contrib/pkginstaller/tmp-bin" binDir="${BASEDIR}/root/podman/bin" +libDir="${BASEDIR}/root/podman/lib" version=$(cat "${BASEDIR}/VERSION") arch=$(cat "${BASEDIR}/ARCH") function build_podman() { pushd "$1" - make GOARCH="${goArch}" podman-remote HELPER_BINARIES_DIR="${HELPER_BINARIES_DIR}" - make GOARCH="${goArch}" podman-mac-helper - cp bin/darwin/podman "contrib/pkginstaller/out/packaging/${binDir}/podman" - cp bin/darwin/podman-mac-helper "contrib/pkginstaller/out/packaging/${binDir}/podman-mac-helper" + + case ${goArch} in + universal) + build_fat + cp "${tmpBin}/podman-universal" "contrib/pkginstaller/out/packaging/${binDir}/podman" + cp "${tmpBin}/podman-mac-helper-universal" "contrib/pkginstaller/out/packaging/${binDir}/podman-mac-helper" + ;; + + amd64 | arm64) + build_podman_arch ${goArch} + cp "${tmpBin}/podman-${goArch}" "contrib/pkginstaller/out/packaging/${binDir}/podman" + cp "${tmpBin}/podman-mac-helper-${goArch}" "contrib/pkginstaller/out/packaging/${binDir}/podman-mac-helper" + ;; + *) + echo -n "Unknown arch: ${goArch}" + ;; + esac + popd } +function build_podman_arch(){ + make -B GOARCH="$1" podman-remote HELPER_BINARIES_DIR="${HELPER_BINARIES_DIR}" + make -B GOARCH="$1" podman-mac-helper + mkdir -p "${tmpBin}" + cp bin/darwin/podman "${tmpBin}/podman-$1" + cp bin/darwin/podman-mac-helper "${tmpBin}/podman-mac-helper-$1" +} + +function build_fat(){ + echo "Building ARM Podman" + build_podman_arch "arm64" + echo "Building AMD Podman" + build_podman_arch "amd64" + + echo "Creating universal binary" + lipo -create -output "${tmpBin}/podman-universal" "${tmpBin}/podman-arm64" "${tmpBin}/podman-amd64" + lipo -create -output "${tmpBin}/podman-mac-helper-universal" "${tmpBin}/podman-mac-helper-arm64" "${tmpBin}/podman-mac-helper-amd64" +} + function sign() { - if [ "${NO_CODESIGN}" -eq "1" ]; then - return - fi local opts="" entitlements="${BASEDIR}/$(basename "$1").entitlements" if [ -f "${entitlements}" ]; then opts="--entitlements ${entitlements}" fi - codesign --deep --sign "${CODESIGN_IDENTITY}" --options runtime --timestamp --force ${opts} "$1" + if [ ! "${NO_CODESIGN}" -eq "1" ]; then + opts="$opts --options runtime" + fi + codesign --deep --sign "${CODESIGN_IDENTITY}" --timestamp --force ${opts} "$1" } goArch="${arch}" @@ -41,11 +78,18 @@ if [ "${goArch}" = aarch64 ]; then fi build_podman "../../../../" + sign "${binDir}/podman" sign "${binDir}/gvproxy" sign "${binDir}/vfkit" sign "${binDir}/podman-mac-helper" +sign "${binDir}/krunkit" +sign "${libDir}/libkrun-efi.dylib" +sign "${libDir}/libvirglrenderer.1.dylib" +sign "${libDir}/libepoxy.0.dylib" +sign "${libDir}/libMoltenVK.dylib" + pkgbuild --identifier com.redhat.podman --version "${version}" \ --scripts "${BASEDIR}/scripts" \ --root "${BASEDIR}/root" \ diff --git a/contrib/pkginstaller/vfkit.entitlements b/contrib/pkginstaller/vfkit.entitlements new file mode 100644 index 0000000000..3b2c941a9f --- /dev/null +++ b/contrib/pkginstaller/vfkit.entitlements @@ -0,0 +1,12 @@ + + + + + com.apple.security.network.server + + com.apple.security.network.client + + com.apple.security.virtualization + + + diff --git a/contrib/podmanimage/README.md b/contrib/podmanimage/README.md index 043af6d2dd..1e92bb7ac0 100644 --- a/contrib/podmanimage/README.md +++ b/contrib/podmanimage/README.md @@ -1,88 +1,2 @@ -[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***) -[comment]: <> () -[comment]: <> (ANY changes made to this file, once committed/merged must) -[comment]: <> (be manually copy/pasted -in markdown- into the description) -[comment]: <> (field on Quay at the following locations:) -[comment]: <> () -[comment]: <> (https://quay.io/repository/containers/podman) -[comment]: <> (https://quay.io/repository/podman/stable) -[comment]: <> (https://quay.io/repository/podman/testing) -[comment]: <> (https://quay.io/repository/podman/upstream) -[comment]: <> () -[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***) - -![PODMAN logo](https://raw.githubusercontent.com/containers/common/main/logos/podman-logo-full-vert.png) - -# podmanimage - -## Overview - -This directory contains the Containerfiles necessary to create the podmanimage container -images that are housed on quay.io under the Podman account. All repositories where -the images live are public and can be pulled without credentials. These container images are secured and the -resulting containers can run safely with privileges within the container. - -The container images are built using the latest Fedora and then Podman is installed into them. -The PATH in the container images is set to the default PATH provided by Fedora. Also, the -ENTRYPOINT and the WORKDIR variables are not set within these container images, as such they -default to `/`. - -The container images are: - - * `quay.io/containers/podman:` and `quay.io/podman/stable:` - - These images are built daily. They are intended to contain an unchanging - and stable version of podman. For the most recent `` tags (`vX`, - `vX.Y`, and `vX.Y.Z`) the image contents will be updated daily to incorporate - (especially) security upgrades. For build details, please [see the - configuration file](stable/Containerfile). - * `quay.io/containers/podman:latest` and `quay.io/podman/stable:latest` - - Built daily using the same Containerfile as above. The Podman version - will remain the "latest" available in Fedora, however the other image - contents may vary compared to the version-tagged images. - * `quay.io/podman/testing:latest` - This image is built daily, using the - latest version of Podman that was in the Fedora `updates-testing` repository. - The image is Built with [the testing Containerfile](testing/Containerfile). - * `quay.io/podman/upstream:latest` - This image is built daily using the latest - code found in this GitHub repository. Due to the image changing frequently, - it's not guaranteed to be stable or even executable. The image is built with - [the upstream Containerfile](upstream/Containerfile). Note the actual compilation - of upstream podman [occurs continuously in - COPR](https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/). - -## Sample Usage - - -``` -podman pull docker://quay.io/podman/stable:latest - -podman run --privileged stable podman version - -# Create a directory on the host to mount the container's -# /var/lib/container directory to so containers can be -# run within the container. -mkdir /var/lib/mycontainer - -# Run the image detached using the host's network in a container name -# podmanctr, turn off label and seccomp confinement in the container -# and then do a little shell hackery to keep the container up and running. -podman run --detach --name=podmanctr --net=host --security-opt label=disable --security-opt seccomp=unconfined --device /dev/fuse:rw -v /var/lib/mycontainer:/var/lib/containers:Z --privileged stable sh -c 'while true ;do sleep 100000 ; done' - -podman exec -it podmanctr /bin/sh - -# Now inside of the container - -podman pull alpine - -podman images - -exit -``` - -**Note:** If you encounter a `fuse: device not found` error when running the container image, it is likely that -the fuse kernel module has not been loaded on your host system. Use the command `modprobe fuse` to load the -module and then run the container image. To enable this automatically at boot time, you can add a configuration -file to `/etc/modules.load.d`. See `man modules-load.d` for more details. - -### Blog Post with Details - -Dan Walsh wrote a blog post on the [Enable Sysadmin](https://www.redhat.com/sysadmin/) site titled [How to use Podman inside of a container](https://www.redhat.com/sysadmin/podman-inside-container). In it, he details how to use these images as a rootful and as a rootless user. Please refer to this blog for more detailed information. +The podman container image build context and automation have been +moved to [https://github.com/containers/image_build/tree/main/podman](https://github.com/containers/image_build/tree/main/podman) diff --git a/contrib/podmanimage/stable/Containerfile b/contrib/podmanimage/stable/Containerfile deleted file mode 100644 index fa776ead17..0000000000 --- a/contrib/podmanimage/stable/Containerfile +++ /dev/null @@ -1,61 +0,0 @@ -# stable/Containerfile -# -# Build a Podman container image from the latest -# stable version of Podman on the Fedoras Updates System. -# https://bodhi.fedoraproject.org/updates/?search=podman -# This image can be used to create a secured container -# that runs safely with privileges within the container. -# -FROM registry.fedoraproject.org/fedora:latest - -# Don't include container-selinux and remove -# directories used by dnf that are just taking -# up space. -# TODO: rpm --setcaps... needed due to Fedora (base) image builds -# being (maybe still?) affected by -# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3 -RUN dnf -y update && \ - rpm --setcaps shadow-utils 2>/dev/null && \ - dnf -y install podman fuse-overlayfs openssh-clients \ - --exclude container-selinux && \ - dnf clean all && \ - rm -rf /var/cache /var/log/dnf* /var/log/yum.* - -RUN useradd podman; \ -echo -e "podman:1:999\npodman:1001:64535" > /etc/subuid; \ -echo -e "podman:1:999\npodman:1001:64535" > /etc/subgid; - -ARG _REPO_URL="https://raw.githubusercontent.com/containers/podman/main/contrib/podmanimage/stable" -ADD $_REPO_URL/containers.conf /etc/containers/containers.conf -ADD $_REPO_URL/podman-containers.conf /home/podman/.config/containers/containers.conf - -RUN mkdir -p /home/podman/.local/share/containers && \ - chown podman:podman -R /home/podman && \ - chmod 644 /etc/containers/containers.conf - -# Copy & modify the defaults to provide reference if runtime changes needed. -# Changes here are required for running with fuse-overlay storage inside container. -RUN sed -e 's|^#mount_program|mount_program|g' \ - -e '/additionalimage.*/a "/var/lib/shared",' \ - -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \ - /usr/share/containers/storage.conf \ - > /etc/containers/storage.conf - -# Setup internal Podman to pass subscriptions down from host to internal container -RUN printf '/run/secrets/etc-pki-entitlement:/run/secrets/etc-pki-entitlement\n/run/secrets/rhsm:/run/secrets/rhsm\n' > /etc/containers/mounts.conf - -# Note VOLUME options must always happen after the chown call above -# RUN commands can not modify existing volumes -VOLUME /var/lib/containers -VOLUME /home/podman/.local/share/containers - -RUN mkdir -p /var/lib/shared/overlay-images \ - /var/lib/shared/overlay-layers \ - /var/lib/shared/vfs-images \ - /var/lib/shared/vfs-layers && \ - touch /var/lib/shared/overlay-images/images.lock && \ - touch /var/lib/shared/overlay-layers/layers.lock && \ - touch /var/lib/shared/vfs-images/images.lock && \ - touch /var/lib/shared/vfs-layers/layers.lock - -ENV _CONTAINERS_USERNS_CONFIGURED="" diff --git a/contrib/podmanimage/stable/README.md b/contrib/podmanimage/stable/README.md new file mode 100644 index 0000000000..1e92bb7ac0 --- /dev/null +++ b/contrib/podmanimage/stable/README.md @@ -0,0 +1,2 @@ +The podman container image build context and automation have been +moved to [https://github.com/containers/image_build/tree/main/podman](https://github.com/containers/image_build/tree/main/podman) diff --git a/contrib/podmanimage/stable/containers.conf b/contrib/podmanimage/stable/containers.conf deleted file mode 100644 index 220c1f850c..0000000000 --- a/contrib/podmanimage/stable/containers.conf +++ /dev/null @@ -1,12 +0,0 @@ -[containers] -netns="host" -userns="host" -ipcns="host" -utsns="host" -cgroupns="host" -cgroups="disabled" -log_driver = "k8s-file" -[engine] -cgroup_manager = "cgroupfs" -events_logger="file" -runtime="crun" diff --git a/contrib/podmanimage/stable/podman-containers.conf b/contrib/podmanimage/stable/podman-containers.conf deleted file mode 100644 index 2bdd95a3b5..0000000000 --- a/contrib/podmanimage/stable/podman-containers.conf +++ /dev/null @@ -1,5 +0,0 @@ -[containers] -volumes = [ - "/proc:/proc", -] -default_sysctls = [] diff --git a/contrib/podmanimage/testing/Containerfile b/contrib/podmanimage/testing/Containerfile deleted file mode 100644 index da9f740203..0000000000 --- a/contrib/podmanimage/testing/Containerfile +++ /dev/null @@ -1,66 +0,0 @@ -# testing/Containerfile -# -# Build a Podman container image from the latest -# stable version of Podman on the Fedoras Updates System. -# https://bodhi.fedoraproject.org/updates/?search=podman -# This image can be used to create a secured container -# that runs safely with privileges within the container. -# -FROM registry.fedoraproject.org/fedora:latest - -# Don't include container-selinux and remove -# directories used by dnf that are just taking -# up space. -# TODO: rpm --setcaps... needed due to Fedora (base) image builds -# being (maybe still?) affected by -# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3 -RUN dnf -y update && \ - rpm --setcaps shadow-utils 2>/dev/null && \ - dnf -y install podman fuse-overlayfs openssh-clients \ - --exclude container-selinux --enablerepo updates-testing && \ - dnf clean all && \ - rm -rf /var/cache /var/log/dnf* /var/log/yum.* - -RUN useradd podman; \ -echo -e "podman:1:999\npodman:1001:64535" > /etc/subuid; \ -echo -e "podman:1:999\npodman:1001:64535" > /etc/subgid; - -ARG _REPO_URL="https://raw.githubusercontent.com/containers/podman/main/contrib/podmanimage/stable" -ADD $_REPO_URL/containers.conf /etc/containers/containers.conf -ADD $_REPO_URL/podman-containers.conf /home/podman/.config/containers/containers.conf - -RUN mkdir -p /home/podman/.local/share/containers && \ - chown podman:podman -R /home/podman - -# Copy & modify the defaults to provide reference if runtime changes needed. -# Changes here are required for running with fuse-overlay storage inside container. -RUN sed -e 's|^#mount_program|mount_program|g' \ - -e '/additionalimage.*/a "/var/lib/shared",' \ - -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \ - /usr/share/containers/storage.conf \ - > /etc/containers/storage.conf - -# Setup internal Podman to pass secrets/subscriptions down from host to internal container -RUN printf '/run/secrets/etc-pki-entitlement:/run/secrets/etc-pki-entitlement\n/run/secrets/rhsm:/run/secrets/rhsm\n' > /etc/containers/mounts.conf - -# Note VOLUME options must always happen after the chown call above -# RUN commands can not modify existing volumes -VOLUME /var/lib/containers -VOLUME /home/podman/.local/share/containers - -# chmod containers.conf and adjust storage.conf to enable Fuse storage. -RUN chmod 644 /etc/containers/containers.conf && \ - sed -i -e 's|^#mount_program|mount_program|g' \ - -e '/additionalimage.*/a "/var/lib/shared",' \ - -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \ - /etc/containers/storage.conf -RUN mkdir -p /var/lib/shared/overlay-images \ - /var/lib/shared/overlay-layers \ - /var/lib/shared/vfs-images \ - /var/lib/shared/vfs-layers && \ - touch /var/lib/shared/overlay-images/images.lock && \ - touch /var/lib/shared/overlay-layers/layers.lock && \ - touch /var/lib/shared/vfs-images/images.lock && \ - touch /var/lib/shared/vfs-layers/layers.lock - -ENV _CONTAINERS_USERNS_CONFIGURED="" diff --git a/contrib/podmanimage/upstream/Containerfile b/contrib/podmanimage/upstream/Containerfile deleted file mode 100644 index e31b94746f..0000000000 --- a/contrib/podmanimage/upstream/Containerfile +++ /dev/null @@ -1,68 +0,0 @@ -# upstream/Containerfile -# -# Build a Podman container image from the latest -# upstream version of Podman on GitHub. -# https://github.com/containers/podman -# This image can be used to create a secured container -# that runs safely with privileges within the container. -# The containers created by this image also come with a -# Podman development environment in /root/podman. -# -FROM registry.fedoraproject.org/fedora:latest - -# Don't include container-selinux and remove -# directories used by dnf that are just taking -# up space. The latest podman + deps. come from -# https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/ -# TODO: rpm --setcaps... needed due to Fedora (base) image builds -# being (maybe still?) affected by -# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3 -RUN dnf -y update && \ - rpm --setcaps shadow-utils 2>/dev/null && \ - dnf -y install 'dnf-command(copr)' --enablerepo=updates-testing && \ - dnf -y copr enable rhcontainerbot/podman-next && \ - dnf -y install podman fuse-overlayfs openssh-clients \ - --exclude container-selinux \ - --enablerepo=updates-testing && \ - dnf clean all && \ - rm -rf /var/cache /var/log/dnf* /var/log/yum.* - -RUN useradd podman; \ -echo -e "podman:1:999\npodman:1001:64535" > /etc/subuid; \ -echo -e "podman:1:999\npodman:1001:64535" > /etc/subgid; - -ARG _REPO_URL="https://raw.githubusercontent.com/containers/podman/main/contrib/podmanimage/stable" -ADD $_REPO_URL/containers.conf /etc/containers/containers.conf -ADD $_REPO_URL/podman-containers.conf /home/podman/.config/containers/containers.conf - -RUN mkdir -p /home/podman/.local/share/containers && \ - chown podman:podman -R /home/podman && \ - chmod 644 /etc/containers/containers.conf - -# Copy & modify the defaults to provide reference if runtime changes needed. -# Changes here are required for running with fuse-overlay storage inside container. -RUN sed -e 's|^#mount_program|mount_program|g' \ - -e '/additionalimage.*/a "/var/lib/shared",' \ - -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \ - /usr/share/containers/storage.conf \ - > /etc/containers/storage.conf - -# Setup internal Podman to pass secrets/subscriptions down from host to internal container -RUN printf '/run/secrets/etc-pki-entitlement:/run/secrets/etc-pki-entitlement\n/run/secrets/rhsm:/run/secrets/rhsm\n' > /etc/containers/mounts.conf - -# Note VOLUME options must always happen after the chown call above -# RUN commands can not modify existing volumes -VOLUME /var/lib/containers -VOLUME /home/podman/.local/share/containers - -RUN mkdir -p /var/lib/shared/overlay-images \ - /var/lib/shared/overlay-layers \ - /var/lib/shared/vfs-images \ - /var/lib/shared/vfs-layers && \ - touch /var/lib/shared/overlay-images/images.lock && \ - touch /var/lib/shared/overlay-layers/layers.lock && \ - touch /var/lib/shared/vfs-images/images.lock && \ - touch /var/lib/shared/vfs-layers/layers.lock - -ENV _CONTAINERS_USERNS_CONFIGURED="" \ - BUILDAH_ISOLATION=chroot diff --git a/contrib/podmanremoteimage/Containerfile b/contrib/podmanremoteimage/Containerfile deleted file mode 100644 index 7d19bf6b08..0000000000 --- a/contrib/podmanremoteimage/Containerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM registry.access.redhat.com/ubi8/go-toolset:latest AS builder -WORKDIR /opt/app-root/src -COPY . . -RUN make podman-remote-static-linux_amd64 -RUN GOOS=windows make podman-remote -RUN GOOS=darwin make podman-remote - -FROM scratch -COPY --from=builder /opt/app-root/src/bin . -ENTRYPOINT ["/podman-remote-static-linux_amd64"] diff --git a/contrib/podmanremoteimage/README.md b/contrib/podmanremoteimage/README.md deleted file mode 100644 index ea4be3a865..0000000000 --- a/contrib/podmanremoteimage/README.md +++ /dev/null @@ -1,25 +0,0 @@ -podman-remote-images -==================== - -Overview --------- - -This directory contains the containerfile for creating a container image which consist podman-remote binary -for each platform (win/linux/mac). - -Users can copy those binaries onto the specific platforms using following instructions - -- For Windows binary -```bash -$ podman cp $(podman create --name remote-temp quay.io/containers/podman-remote-artifacts:latest):/windows/podman.exe . && podman rm remote-temp -``` - -- For Linux binary -```bash -$ podman cp $(podman create --name remote-temp quay.io/containers/podman-remote-artifacts:latest):/podman-remote-static-linux_amd64 . && podman rm remote-temp -``` - -- For Mac binary -```bash -$ podman cp $(podman create --name remote-temp quay.io/containers/podman-remote-artifacts:latest):/darwin/podman . && podman rm remote-temp -``` diff --git a/contrib/validatepr/Containerfile b/contrib/validatepr/Containerfile new file mode 100644 index 0000000000..5eba4c8fbe --- /dev/null +++ b/contrib/validatepr/Containerfile @@ -0,0 +1,17 @@ +FROM registry.fedoraproject.org/fedora:latest + +WORKDIR /go/src/github.com/containers/podman + +RUN dnf install -y systemd-devel \ + libassuan-devel \ + libseccomp-devel \ + gpgme-devel \ + device-mapper-devel \ + btrfs-progs-devel \ + golang \ + make \ + man-db \ + git \ + perl-Clone \ + perl-FindBin \ + pre-commit && dnf clean all diff --git a/contrib/validatepr/validatepr.sh b/contrib/validatepr/validatepr.sh new file mode 100755 index 0000000000..ad754b409c --- /dev/null +++ b/contrib/validatepr/validatepr.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +set -x + +# +# This script is intended to help developers contribute to the podman project. It +# checks various pre-CI checks like building, linting, man-pages, etc. It is meant +# to be run in a specific container environment. + +build() { + err="" + + echo "Building windows" + if ! GOOS=windows CGO_ENABLED=0 go build -tags "$REMOTETAGS" -o bin/podman-remote-windows ./cmd/podman; then + err+="\n - Windows " + fi + + echo "Building darwin" + if ! GOOS=darwin CGO_ENABLED=0 go build -tags "$REMOTETAGS" -o bin/podman-remote-darwin ./cmd/podman; then + err+="\n - Darwin " + fi + + echo "Building podman binaries" + if ! make binaries; then + err+="\n - Additional Binaries " + fi + + if [ ! -z "$err" ] + then + echo -e "\033[31mFailed to build: ${err}\033[0m">&2 + exit 1 + fi +} + +validate(){ + echo "Running validation tooling" + + # golangci-lint gobbles memory. + # By default, podman machines only have 2GB memory, + # often causing the linter be killed when run on Darwin/Windows + mem=$(awk '/MemTotal/ {print $2}' /proc/meminfo) + if (( $((mem)) < 3900000 )); then + echo -e "\033[33mWarning: Your machine may not have sufficient memory (< 4 GB)to run the linter. \ +If the process is killed, please allocate more memory.\033[0m">&2 + fi + + make validate +} + +build +validate diff --git a/contrib/win-installer/build.ps1 b/contrib/win-installer/build.ps1 index c8741563ea..6c5189d7f9 100644 --- a/contrib/win-installer/build.ps1 +++ b/contrib/win-installer/build.ps1 @@ -99,7 +99,7 @@ if ($args.Count -lt 1 -or $args[0].Length -lt 1) { } # Pre-set to standard locations in-case build env does not refresh paths -$Env:Path="$Env:Path;C:\Program Files (x86)\WiX Toolset v3.11\bin;C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin;;C:\Program Files\Go\bin" +$Env:Path="$Env:Path;C:\Program Files (x86)\WiX Toolset v3.14\bin;C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin;;C:\Program Files\Go\bin" CheckRequirements @@ -139,10 +139,18 @@ SignItem @("artifacts/win-sshproxy.exe", $gvExists = Test-Path "artifacts/gvproxy.exe" if ($gvExists) { SignItem @("artifacts/gvproxy.exe") + Remove-Item Env:\UseGVProxy -ErrorAction SilentlyContinue } else { $env:UseGVProxy = "Skip" } +# Retaining for possible future additions +# $pExists = Test-Path "artifacts/policy.json" +# if ($pExists) { +# Remove-Item Env:\IncludePolicyJSON -ErrorAction SilentlyContinue +# } else { +# $env:IncludePolicyJSON = "Skip" +# } .\build-msi.bat $ENV:INSTVER; ExitOnError SignItem @("podman.msi") diff --git a/contrib/win-installer/burn.wxs b/contrib/win-installer/burn.wxs index ae352e6c90..6a367c5457 100644 --- a/contrib/win-installer/burn.wxs +++ b/contrib/win-installer/burn.wxs @@ -17,10 +17,13 @@ + + + @@ -35,9 +38,12 @@ + + + - + diff --git a/contrib/win-installer/podman-msihooks/check.c b/contrib/win-installer/podman-msihooks/check.c index 1a3b10e479..2925c0b8f4 100644 --- a/contrib/win-installer/podman-msihooks/check.c +++ b/contrib/win-installer/podman-msihooks/check.c @@ -2,7 +2,9 @@ #include BOOL isWSLEnabled(); +BOOL isHyperVEnabled(); LPCWSTR boolToNStr(BOOL bool); +LPCSTR szSvcNameHyperv = TEXT("vmms"); /** * CheckWSL is a custom action loaded by the Podman Windows installer @@ -28,6 +30,19 @@ LPCWSTR boolToNStr(BOOL bool); return 0; } +/** + * CheckHyperV is a custom action loaded by the Podman Windows installer + * to determine whether the system already has Hyper-V installed. + */ + + __declspec(dllexport) UINT __cdecl CheckHyperV(MSIHANDLE hInstall) { + BOOL hasHyperV = isHyperVEnabled(); + // Set a property with the HyperV state for the installer to operate on + MsiSetPropertyW(hInstall, L"HAS_HYPERVFEATURE", boolToNStr(hasHyperV)); + + return 0; +} + LPCWSTR boolToNStr(BOOL bool) { return bool ? L"1" : L"0"; } @@ -51,7 +66,7 @@ BOOL isWSLEnabled() { // CreateProcessW requires lpCommandLine to be mutable wchar_t cmd[] = L"wsl --set-default-version 2"; if (! CreateProcessW(NULL, cmd, NULL, NULL, FALSE, CREATE_NEW_CONSOLE, - NULL, NULL, &startup, &process)) { + NULL, NULL, &startup, &process)) { return FALSE; } @@ -64,3 +79,52 @@ BOOL isWSLEnabled() { return exitCode == 0; } + +BOOL isHyperVEnabled() { + /* + * Checks if the Windows service `vmms` is running to + * determine if Hyper-V is enabled. + */ + SC_HANDLE schSCManager; + SC_HANDLE schService; + SERVICE_STATUS_PROCESS ssStatus; + DWORD dwBytesNeeded; + + // Get a handle to the SCM database. + schSCManager = OpenSCManager( + NULL, // local computer + NULL, // servicesActive database + SERVICE_QUERY_STATUS); // service query access rights + + if (NULL == schSCManager) { + return FALSE; + } + + // Get a handle to the service. + schService = OpenService( + schSCManager, + szSvcNameHyperv, + SERVICE_QUERY_STATUS); + + if (schService == NULL) { + CloseServiceHandle(schSCManager); + return FALSE; + } + + // Check the status + if (!QueryServiceStatusEx( + schService, // handle to service + SC_STATUS_PROCESS_INFO, // information level + (LPBYTE) &ssStatus, // address of structure + sizeof(SERVICE_STATUS_PROCESS), // size of structure + &dwBytesNeeded ) ) { + CloseServiceHandle(schService); + CloseServiceHandle(schSCManager); + return FALSE; + } + + CloseServiceHandle(schService); + CloseServiceHandle(schSCManager); + + return ssStatus.dwCurrentState == SERVICE_RUNNING; +} diff --git a/contrib/win-installer/podman.wxs b/contrib/win-installer/podman.wxs index 9c4fa2780e..d1590c85ef 100644 --- a/contrib/win-installer/podman.wxs +++ b/contrib/win-installer/podman.wxs @@ -12,6 +12,7 @@ + @@ -19,7 +20,83 @@ - NOT (WITH_WSL = 0) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -47,6 +124,22 @@ + + + + + + + + CREATE_MACHINE_PROVIDER_CONFIG_FILE + + + + @@ -56,16 +149,24 @@ - + + - - (NOT Installed) AND WSL_INSTALL = 1 AND HAS_WSLFEATURE = 0 - + + (NOT Installed) AND WSL_INSTALL = 1 + + + + (NOT Installed) AND HYPERV_INSTALL = 1 + + + + @@ -75,8 +176,10 @@ + + @@ -93,8 +196,13 @@ - WSL_INSTALL = 1 - (NOT Installed) AND WSL_INSTALL = 1 AND HAS_WSLFEATURE = 0 AND NOT AFTERREBOOT + + + + (NOT Installed) + AND ((WSL_INSTALL = 1) OR (HYPERV_INSTALL = 1)) + AND (NOT AFTERREBOOT) + diff --git a/contrib/win-installer/process-release.ps1 b/contrib/win-installer/process-release.ps1 index 5706558a6d..230ad7a43b 100644 --- a/contrib/win-installer/process-release.ps1 +++ b/contrib/win-installer/process-release.ps1 @@ -135,6 +135,14 @@ try { Copy-Artifact("gvproxy.exe") } + # Retaining for future additions + # $loc = Get-ChildItem -Recurse -Path . -Name policy.json + # if (!$loc) { + # Write-Host "Skipping policy.json artifact" + # } else { + # Copy-Artifact("policy.json") + # } + $docsloc = Get-ChildItem -Path . -Name docs -Recurse $loc = Get-ChildItem -Recurse -Path . -Name podman-for-windows.html if (!$loc) { diff --git a/contrib/win-installer/resources/podman-dialog.png b/contrib/win-installer/resources/podman-dialog.png index cf0c49d984..946bd095ad 100644 Binary files a/contrib/win-installer/resources/podman-dialog.png and b/contrib/win-installer/resources/podman-dialog.png differ diff --git a/contrib/win-installer/welcome-install-dlg.wxs b/contrib/win-installer/welcome-install-dlg.wxs index cc0f1c6433..325cd74821 100644 --- a/contrib/win-installer/welcome-install-dlg.wxs +++ b/contrib/win-installer/welcome-install-dlg.wxs @@ -1,31 +1,49 @@ - - - - - - - - - - - - - !(wix.WixUICostingPopupOptOut) OR CostingComplete = 1 - 1]]> - OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 0 AND (PROMPTROLLBACKCOST="P" OR NOT PROMPTROLLBACKCOST) - OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 0 AND PROMPTROLLBACKCOST="D" - OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 0 AND PROMPTROLLBACKCOST="D" - (OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 1) OR (OutOfDiskSpace = 1 AND PROMPTROLLBACKCOST="F") - - - 1 - - - - - - - NOT Installed - - - + + + + + + + + + + HIDE_PROVIDER_CHOICE + + + + + + + HIDE_PROVIDER_CHOICE + + + MACHINE_PROVIDER = "wsl" + MACHINE_PROVIDER = "hyperv" OR HIDE_PROVIDER_CHOICE + HAS_WSLFEATURE = 1 + + + MACHINE_PROVIDER = "hyperv" + MACHINE_PROVIDER = "wsl" OR HIDE_PROVIDER_CHOICE + HAS_HYPERVFEATURE = 1 + + + + !(wix.WixUICostingPopupOptOut) OR CostingComplete = 1 + 1]]> + OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 0 AND (PROMPTROLLBACKCOST="P" OR NOT PROMPTROLLBACKCOST) + OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 0 AND PROMPTROLLBACKCOST="D" + OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 0 AND PROMPTROLLBACKCOST="D" + (OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 1) OR (OutOfDiskSpace = 1 AND PROMPTROLLBACKCOST="F") + + + 1 + + + + + + + NOT Installed + + + diff --git a/docs/kubernetes_support.md b/docs/kubernetes_support.md index b7bc112451..4806a1e9e8 100644 --- a/docs/kubernetes_support.md +++ b/docs/kubernetes_support.md @@ -104,7 +104,7 @@ Note: **N/A** means that the option cannot be supported in a single-node Podman | volumeMounts\.name | ✅ | | volumeMounts\.mountPropagation | no | | volumeMounts\.readOnly | ✅ | -| volumeMounts\.subPath | no | +| volumeMounts\.subPath | ✅ | | volumeMounts\.subPathExpr | no | | volumeDevices\.devicePath | no | | volumeDevices\.name | no | diff --git a/docs/make.bat b/docs/make.bat deleted file mode 100644 index 6247f7e231..0000000000 --- a/docs/make.bat +++ /dev/null @@ -1,35 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=source -set BUILDDIR=build - -if "%1" == "" goto help - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 -) - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% - -:end -popd diff --git a/docs/make.ps1 b/docs/make.ps1 new file mode 100644 index 0000000000..27289aae7f --- /dev/null +++ b/docs/make.ps1 @@ -0,0 +1,147 @@ +function Get-Podman-Commands-List{ + param ( + [string]$podmanClient, + [string]$command + ); + if(!$podmanClient) { + $podmanClient="$PSScriptRoot\..\bin\windows\podman.exe" + } + if($command) { + $podmanHelpCommand="help $command" + Write-Host "Retrieving the list of ""podman $command"" subcommands." + } else { + $podmanHelpCommand="help" + Write-Host "Retrieving the list of ""podman"" commands." + } + + # Retrieve the list of subcommands of $command + # e.g. "podman help machine" returns the list of + # "podman machine" subcommands: info, init, etc... + $subCommands = @() + $subCommands = Invoke-Expression "$podmanClient $podmanHelpCommand" | + Select-String -Pattern "^\s*Available Commands:" -Context 0, 1000 | Out-String -Stream | + Select-String -Pattern "^\s+$" -Context 1000, 0 | Out-String -Stream | + Select-String -Pattern ">\s*Available Commands:|^>\s*$|^\s*$" -NotMatch | Out-String -Stream | + ForEach-Object { $_ -replace '^\s*(\w+)\s+.*$', '$1' } | Where-Object { $_ -ne "" } + + if ($command) { + $subCommands = $subCommands | ForEach-Object { "$command $_" } + } + + # Recursively get the list of sub-subcommands for each subcommand + foreach ($subCommand in $subCommands) { + + $subSubCommands = @() + $subSubCommands = Get-Podman-Commands-List -podmanClient "$podmanClient" -command "${subCommand}" + + if ($subSubCommands) { + $subCommands += $subSubCommands + } + } + + return $subCommands +} + +function Build-Podman-For-Windows-HTML-Page{ + $srcFolder = "$PSScriptRoot\tutorials" + $srcFile = "$srcFolder\podman-for-windows.md" + $destFolder = "$PSScriptRoot\build\remote" + $destFile = "$destFolder\podman-for-windows.html" + $cssFile = "$PSScriptRoot\standalone-styling.css" + $pandocOptions = "--ascii --from markdown-smart -c $cssFile --standalone " + + "--embed-resources --metadata title=""Podman for Windows"" " + + "-V title=" + + Write-Host -NoNewline "Generating $destFile from $srcFile..." + Push-Location $srcFolder + New-Item -ItemType Directory -Force -Path $destFolder | Out-Null + Invoke-Expression "pandoc $pandocOptions $srcFile > $destFile" + Pop-Location + Write-Host "done." +} + +function Build-Podman-Remote-HTML-Page{ + $markdownFolder = "$PSScriptRoot\source\markdown" + # Look for all podman-remote*.md files in the markdown folder + Get-ChildItem -Path "$markdownFolder" -Filter "podman-remote*.md" | ForEach-Object { + # Extract the command name from the file name + $command = $_.Name -replace '^podman-(.*).1.md$','$1' + # Generate the documentation HTML page + Build-Podman-Command-HTML-Page -command $command + } +} + +function Find-Podman-Command-Markdown-File{ + param ( + [string]$command + ); + # A podman command documentation can be in one of the following files + $markdownFolder = "$PSScriptRoot\source\markdown" + $srcFileMdIn = "$markdownFolder\podman-$command.1.md.in" + $srcFileMd = "$markdownFolder\podman-$command.1.md" + $linkFile = "$markdownFolder\links\podman-$command.1" + + if (Test-Path -Path $srcFileMdIn -PathType Leaf) { + return $srcFileMdIn + } elseif (Test-Path -Path $srcFileMd -PathType Leaf) { + return $srcFileMd + } elseif (Test-Path -Path $linkFile -PathType Leaf) { + # In $linkFile there is a link to a markdown file + $srcFile = Get-Content -Path $linkFile + # $srcFile is something like ".so man1/podman-attach.1" + # and the markdown file is "podman-attach.1.md" + $srcFile = $srcFile -replace ".so man1/", "" + $srcFileMdIn = "$markdownFolder\$srcFile.md.in" + $srcFileMd = "$markdownFolder\$srcFile.md" + if (Test-Path -Path "$srcFileMdIn" -PathType Leaf) { + return "$srcFileMdIn" + } elseif (Test-Path -Path $srcFileMd -PathType Leaf) { + return "$srcFileMd" + } + } + return $null +} + +function Build-Podman-Command-HTML-Page{ + param ( + [string]$command + ); + + $destFile = "$PSScriptRoot\build\remote\podman-$command.html" + $srcFile = Find-Podman-Command-Markdown-File -command $command + + if (!$srcFile) { + Write-Host "Couldn't find the documentation source file for $command. Skipping." + continue + } + + $pandocOptions = "--ascii --standalone --from markdown-smart " + + "--lua-filter=$PSScriptRoot\links-to-html.lua " + + "--lua-filter=$PSScriptRoot\use-pagetitle.lua" + + Write-Host -NoNewline "Generating $command documentation..." + Invoke-Expression "pandoc $pandocOptions -o $destFile $srcFile" | Out-Null + Write-Host "done." +} + +# Generate podman-for-windows.html +Build-Podman-For-Windows-HTML-Page + +# Generate podman-remote*.html +Build-Podman-Remote-HTML-Page + +# Get the list of podman commands on Windows +if ($args[1]) { + $commands = Get-Podman-Commands-List "-podmanClient $args[1]" +} +else { + $commands = Get-Podman-Commands-List +} + +# Generate podman commands documentation +foreach ($command in $commands) { + # Replace spaces with hyphens in the command name + # e.g. machine os apply becomes machine-os-apply + $command = $command -replace ' ', '-' + Build-Podman-Command-HTML-Page -command $command +} diff --git a/docs/source/Commands.rst b/docs/source/Commands.rst index 547b86e97c..6a81adfa89 100644 --- a/docs/source/Commands.rst +++ b/docs/source/Commands.rst @@ -13,23 +13,25 @@ Commands :doc:`commit ` Create new image based on the changed container -:doc:`container ` Manage Containers +:doc:`container ` Manage containers :doc:`cp ` Copy files/folders between a container and the local filesystem :doc:`create ` Create but do not start a container -:doc:`diff ` Inspect changes on container's file systems +:doc:`diff ` Display the changes to the object's file system -:doc:`events ` Show podman events +:doc:`events ` Show podman system events :doc:`exec ` Run a process in a running container :doc:`export ` Export container's filesystem contents as a tar archive -:doc:`generate ` Generated structured data +:doc:`farm ` Farm out builds to remote machines -:doc:`healthcheck ` Manage Healthcheck +:doc:`generate ` Generate structured data based on containers, pods or volumes + +:doc:`healthcheck ` Manage health checks on containers :doc:`history ` Show history of a specified image @@ -43,27 +45,27 @@ Commands :doc:`init ` Initialize one or more containers -:doc:`inspect ` Display the configuration of a container or image +:doc:`inspect ` Display the configuration of object denoted by ID :doc:`kill ` Kill one or more running containers with a specific signal -:doc:`kube ` Play a pod +:doc:`kube ` Play containers, pods or volumes from a structured file -:doc:`load ` Load an image from container archive +:doc:`load ` Load image(s) from a tar archive :doc:`login ` Log in to a container registry :doc:`logout ` Log out of a container registry -:doc:`logs ` Fetch the logs of a container +:doc:`logs ` Fetch the logs of one or more containers -:doc:`machine ` Manage podman's virtual machine +:doc:`machine ` Manage a virtual machine -:doc:`manifest ` Create and manipulate manifest lists and image indexes +:doc:`manifest ` Manipulate manifest lists and image indexes :doc:`mount ` Mount a working container's root filesystem -:doc:`network ` Manage Networks +:doc:`network ` Manage networks :doc:`pause ` Pause all the processes in one or more containers @@ -87,11 +89,11 @@ Commands :doc:`run ` Run a command in a new container -:doc:`save ` Save image to an archive +:doc:`save ` Save image(s) to an archive :doc:`search ` Search registry for image -:doc:`secret ` Manage podman secrets +:doc:`secret ` Manage secrets :doc:`start ` Start one or more containers @@ -111,9 +113,11 @@ Commands :doc:`unshare ` Run a command in a modified user namespace -:doc:`untag ` Remove one or more names from a locally-stored image +:doc:`untag ` Remove a name from a local image + +:doc:`update ` Update an existing container -:doc:`version ` Display the Podman Version Information +:doc:`version ` Display the Podman version information :doc:`volume ` Manage volumes diff --git a/docs/source/Reference.rst b/docs/source/Reference.rst index de5800c116..ae80d66de8 100644 --- a/docs/source/Reference.rst +++ b/docs/source/Reference.rst @@ -7,6 +7,12 @@ Show the API documentation for version: * `latest (main branch) <_static/api.html>`_ +* `version 5.0 <_static/api.html?version=v5.0>`_ + +* `version 4.9 <_static/api.html?version=v4.9>`_ + +* `version 4.8 <_static/api.html?version=v4.8>`_ + * `version 4.7 <_static/api.html?version=v4.7>`_ * `version 4.6 <_static/api.html?version=v4.6>`_ diff --git a/docs/source/Tutorials.rst b/docs/source/Tutorials.rst index 1c8e946db8..af851b10c7 100644 --- a/docs/source/Tutorials.rst +++ b/docs/source/Tutorials.rst @@ -2,7 +2,7 @@ Tutorials ========= -Here are a number of useful tutorials to get you up and running with Podman. If you are familiar with the Docker `Container Engine`_ the command in Podman_ should be quite familiar. If you are brand new to containers, take a look at our `Introduction`. +Here are a number of useful tutorials to get you up and running with Podman. If you are familiar with the Docker `Container Engine`_ the commands in Podman_ should be quite familiar. If you are brand new to containers, take a look at our `Introduction`. * `Basic Setup and Use of Podman `_: Learn how to set up Podman and perform some basic commands with the utility. * `Basic Setup and Use of Podman in a Rootless environment `_: The steps required to set up rootless Podman are enumerated. diff --git a/docs/source/markdown/options/annotation.manifest.md b/docs/source/markdown/options/annotation.manifest.md index 36914df424..472fc40b40 100644 --- a/docs/source/markdown/options/annotation.manifest.md +++ b/docs/source/markdown/options/annotation.manifest.md @@ -4,4 +4,4 @@ ####> are applicable to all of those. #### **--annotation**=*annotation=value* -Set an annotation on the entry for the image. +Set an annotation on the entry for the specified image or artifact. diff --git a/docs/source/markdown/options/expose.md b/docs/source/markdown/options/expose.md index 6615a350ad..745e25226c 100644 --- a/docs/source/markdown/options/expose.md +++ b/docs/source/markdown/options/expose.md @@ -2,7 +2,11 @@ ####> podman create, run ####> If file is edited, make sure the changes ####> are applicable to all of those. -#### **--expose**=*port* +#### **--expose**=*port[/protocol]* -Expose a port, or a range of ports (e.g. **--expose=3300-3310**) to set up port redirection -on the host system. +Expose a port or a range of ports (e.g. **--expose=3300-3310**). +The protocol can be `tcp`, `udp` or `sctp` and if not given `tcp` is assumed. +This option matches the EXPOSE instruction for image builds and has no effect on +the actual networking rules unless **-P/--publish-all** is used to forward to all +exposed ports from random host ports. To forward specific ports from the host +into the container use the **-p/--publish** option instead. diff --git a/docs/source/markdown/options/features.md b/docs/source/markdown/options/features.md index 05540558f4..0ee84217a1 100644 --- a/docs/source/markdown/options/features.md +++ b/docs/source/markdown/options/features.md @@ -2,7 +2,7 @@ ####> podman manifest add, manifest annotate ####> If file is edited, make sure the changes ####> are applicable to all of those. -#### **--features** +#### **--features**=*feature* Specify the features list which the list or index records as requirements for the image. This option is rarely used. diff --git a/docs/source/markdown/options/log-driver.md b/docs/source/markdown/options/log-driver.md index 754a97c022..d99f229c5e 100644 --- a/docs/source/markdown/options/log-driver.md +++ b/docs/source/markdown/options/log-driver.md @@ -4,7 +4,7 @@ ####> are applicable to all of those. #### **--log-driver**=*driver* -Logging driver for the container. Currently available options are **k8s-file**, **journald**, **none** and **passthrough**, with **json-file** aliased to **k8s-file** for scripting compatibility. (Default **journald**). +Logging driver for the container. Currently available options are **k8s-file**, **journald**, **none**, **passthrough** and **passthrough-tty**, with **json-file** aliased to **k8s-file** for scripting compatibility. (Default **journald**). The podman info command below displays the default log-driver for the system. ``` @@ -14,3 +14,5 @@ journald The **passthrough** driver passes down the standard streams (stdin, stdout, stderr) to the container. It is not allowed with the remote Podman client, including Mac and Windows (excluding WSL2) machines, and on a tty, since it is vulnerable to attacks via TIOCSTI. + +The **passthrough-tty** driver is the same as **passthrough** except that it also allows it to be used on a TTY if the user really wants it. diff --git a/docs/source/markdown/options/mount.md b/docs/source/markdown/options/mount.md index 9a14b39fd1..7114d74952 100644 --- a/docs/source/markdown/options/mount.md +++ b/docs/source/markdown/options/mount.md @@ -41,6 +41,8 @@ Options specific to type=**image**: - *rw*, *readwrite*: *true* or *false* (default if unspecified: *false*). +- *subpath*: Mount only a specific path within the image, instead of the whole image. + Options specific to **bind** and **glob**: - *ro*, *readonly*: *true* or *false* (default if unspecified: *false*). diff --git a/docs/source/markdown/options/network.image.md b/docs/source/markdown/options/network.image.md index 9184c784fa..f03a38d9ca 100644 --- a/docs/source/markdown/options/network.image.md +++ b/docs/source/markdown/options/network.image.md @@ -15,7 +15,7 @@ considered insecure. - **ns:**_path_: path to a network namespace to join. - **private**: create a new namespace for the container (default) - **\**: Join the network with the given name or ID, e.g. use `--network mynet` to join the network with the name mynet. Only supported for rootful users. -- **slirp4netns[:OPTIONS,...]**: use **slirp4netns**(1) to create a user network stack. This is the default for rootless containers. It is possible to specify these additional options, they can also be set with `network_cmd_options` in containers.conf: +- **slirp4netns[:OPTIONS,...]**: use **slirp4netns**(1) to create a user network stack. It is possible to specify these additional options, they can also be set with `network_cmd_options` in containers.conf: - **allow_host_loopback=true|false**: Allow slirp4netns to reach the host loopback IP (default is 10.0.2.2 or the second IP from slirp4netns cidr subnet when changed, see the cidr option below). The default is false. - **mtu=MTU**: Specify the MTU to use for this network. (Default is `65520`). - **cidr=CIDR**: Specify ip range to use for this network. (Default is `10.0.2.0/24`). @@ -26,7 +26,7 @@ considered insecure. - **outbound_addr6=IPv6**: Specify the outbound ipv6 address slirp binds to. - **pasta[:OPTIONS,...]**: use **pasta**(1) to create a user-mode networking stack. \ - This is only supported in rootless mode. \ + This is the default for rootless containers and only supported in rootless mode. \ By default, IPv4 and IPv6 addresses and routes, as well as the pod interface name, are copied from the host. If port forwarding isn't configured, ports are forwarded dynamically as services are bound on either side (init diff --git a/docs/source/markdown/options/network.md b/docs/source/markdown/options/network.md index 20cd0dbcfa..2e67cc0df6 100644 --- a/docs/source/markdown/options/network.md +++ b/docs/source/markdown/options/network.md @@ -9,38 +9,44 @@ Set the network mode for the <>. Valid _mode_ values are: - **bridge[:OPTIONS,...]**: Create a network stack on the default bridge. This is the default for rootful containers. It is possible to specify these additional options: - - **alias=name**: Add network-scoped alias for the container. - - **ip=IPv4**: Specify a static ipv4 address for this container. - - **ip=IPv6**: Specify a static ipv6 address for this container. - - **mac=MAC**: Specify a static mac address for this container. - - **interface_name**: Specify a name for the created network interface inside the container. + - **alias=**_name_: Add network-scoped alias for the container. + - **ip=**_IPv4_: Specify a static IPv4 address for this container. + - **ip6=**_IPv6_: Specify a static IPv6 address for this container. + - **mac=**_MAC_: Specify a static MAC address for this container. + - **interface_name=**_name_: Specify a name for the created network interface inside the container. - For example, to set a static ipv4 address and a static mac address, use `--network bridge:ip=10.88.0.10,mac=44:33:22:11:00:99`. - -- \[:OPTIONS,...]: Connect to a user-defined network; this is the network name or ID from a network created by **[podman network create](podman-network-create.1.md)**. Using the network name implies the bridge network mode. It is possible to specify the same options described under the bridge mode above. Use the **--network** option multiple times to specify additional networks. + For example, to set a static ipv4 address and a static mac address, use `--network bridge:ip=10.88.0.10,mac=44:33:22:11:00:99`. +- _\_**[:OPTIONS,...]**: Connect to a user-defined network; this is the network name or ID from a network created by **[podman network create](podman-network-create.1.md)**. It is possible to specify the same options described under the bridge mode above. Use the **--network** option multiple times to specify additional networks. \ For backwards compatibility it is also possible to specify comma-separated networks on the first **--network** argument, however this prevents you from using the options described under the bridge section above. + - **none**: Create a network namespace for the container but do not configure network interfaces for it, thus the container has no network connectivity. + - **container:**_id_: Reuse another container's network stack. + - **host**: Do not create a network namespace, the container uses the host's network. Note: The host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. + - **ns:**_path_: Path to a network namespace to join. + - **private**: Create a new namespace for the container. This uses the **bridge** mode for rootful containers and **slirp4netns** for rootless ones. -- **slirp4netns[:OPTIONS,...]**: use **slirp4netns**(1) to create a user network stack. This is the default for rootless containers. It is possible to specify these additional options, they can also be set with `network_cmd_options` in containers.conf: + +- **slirp4netns[:OPTIONS,...]**: use **slirp4netns**(1) to create a user network stack. It is possible to specify these additional options, they can also be set with `network_cmd_options` in containers.conf: + - **allow_host_loopback=true|false**: Allow slirp4netns to reach the host loopback IP (default is 10.0.2.2 or the second IP from slirp4netns cidr subnet when changed, see the cidr option below). The default is false. - - **mtu=MTU**: Specify the MTU to use for this network. (Default is `65520`). - - **cidr=CIDR**: Specify ip range to use for this network. (Default is `10.0.2.0/24`). + - **mtu=**_MTU_: Specify the MTU to use for this network. (Default is `65520`). + - **cidr=**_CIDR_: Specify ip range to use for this network. (Default is `10.0.2.0/24`). - **enable_ipv6=true|false**: Enable IPv6. Default is true. (Required for `outbound_addr6`). - - **outbound_addr=INTERFACE**: Specify the outbound interface slirp binds to (ipv4 traffic only). - - **outbound_addr=IPv4**: Specify the outbound ipv4 address slirp binds to. - - **outbound_addr6=INTERFACE**: Specify the outbound interface slirp binds to (ipv6 traffic only). - - **outbound_addr6=IPv6**: Specify the outbound ipv6 address slirp binds to. - - **port_handler=rootlesskit**: Use rootlesskit for port forwarding. Default. + - **outbound_addr=**_INTERFACE_: Specify the outbound interface slirp binds to (ipv4 traffic only). + - **outbound_addr=**_IPv4_: Specify the outbound ipv4 address slirp binds to. + - **outbound_addr6=**_INTERFACE_: Specify the outbound interface slirp binds to (ipv6 traffic only). + - **outbound_addr6=**_IPv6_: Specify the outbound ipv6 address slirp binds to. + - **port_handler=rootlesskit**: Use rootlesskit for port forwarding. Default. \ Note: Rootlesskit changes the source IP address of incoming packets to an IP address in the container network namespace, usually `10.0.2.100`. If the application requires the real source IP address, e.g. web server logs, use the slirp4netns port handler. The rootlesskit port handler is also used for rootless containers when connected to user-defined networks. - **port_handler=slirp4netns**: Use the slirp4netns port forwarding, it is slower than rootlesskit but preserves the correct source IP address. This port handler cannot be used for user-defined networks. - **pasta[:OPTIONS,...]**: use **pasta**(1) to create a user-mode networking stack. \ - This is only supported in rootless mode. \ + This is the default for rootless containers and only supported in rootless mode. \ By default, IPv4 and IPv6 addresses and routes, as well as the pod interface name, are copied from the host. If port forwarding isn't configured, ports are forwarded dynamically as services are bound on either side (init diff --git a/docs/source/markdown/options/os-version.md b/docs/source/markdown/options/os-version.md index b6d83544d0..1121ae957b 100644 --- a/docs/source/markdown/options/os-version.md +++ b/docs/source/markdown/options/os-version.md @@ -2,7 +2,7 @@ ####> podman manifest add, manifest annotate ####> If file is edited, make sure the changes ####> are applicable to all of those. -#### **--os-version** +#### **--os-version**=*version* Specify the OS version which the list or index records as a requirement for the image. This option is rarely used. diff --git a/docs/source/markdown/options/pull.image.md b/docs/source/markdown/options/pull.image.md index 0b0a6ccfda..112e612210 100644 --- a/docs/source/markdown/options/pull.image.md +++ b/docs/source/markdown/options/pull.image.md @@ -4,9 +4,9 @@ ####> are applicable to all of those. #### **--pull**=*policy* -Pull image policy. The default is **always**. +Pull image policy. The default is **missing**. -- **always**, **true**: Always pull the image and throw an error if the pull fails. +- **always**: Always pull the image and throw an error if the pull fails. - **missing**: Only pull the image when it does not exist in the local containers storage. Throw an error if no image is found and the pull fails. -- **never**, **false**: Never pull the image but use the one from the local containers storage. Throw an error when no image is found. +- **never**: Never pull the image but use the one from the local containers storage. Throw an error when no image is found. - **newer**: Pull if the image on the registry is newer than the one in the local containers storage. An image is considered to be newer when the digests are different. Comparing the time stamps is prone to errors. Pull errors are suppressed if a local image was found. diff --git a/docs/source/markdown/options/restart.md b/docs/source/markdown/options/restart.md index 6c84cd5ecd..74e4e7814f 100644 --- a/docs/source/markdown/options/restart.md +++ b/docs/source/markdown/options/restart.md @@ -1,5 +1,5 @@ ####> This option file is used in: -####> podman create, pod clone, pod create, run +####> podman create, pod clone, pod create, run, update ####> If file is edited, make sure the changes ####> are applicable to all of those. #### **--restart**=*policy* diff --git a/docs/source/markdown/options/retry-delay.md b/docs/source/markdown/options/retry-delay.md index bb20a06027..005f471e4a 100644 --- a/docs/source/markdown/options/retry-delay.md +++ b/docs/source/markdown/options/retry-delay.md @@ -1,7 +1,8 @@ ####> This option file is used in: -####> podman build, farm build, pull +####> podman build, create, farm build, pull, push, run ####> If file is edited, make sure the changes ####> are applicable to all of those. #### **--retry-delay**=*duration* -Duration of delay between retry attempts in case of failure when performing pull of images from registry. Default is **2s**. +Duration of delay between retry attempts when pulling or pushing images between +the registry and local storage in case of failure. The default is to start at two seconds and then exponentially back off. The delay is used when this value is set, and no exponential back off occurs. diff --git a/docs/source/markdown/options/retry.md b/docs/source/markdown/options/retry.md index e3c72b65d8..0abca43aa0 100644 --- a/docs/source/markdown/options/retry.md +++ b/docs/source/markdown/options/retry.md @@ -1,8 +1,8 @@ ####> This option file is used in: -####> podman build, farm build, pull +####> podman build, create, farm build, pull, push, run ####> If file is edited, make sure the changes ####> are applicable to all of those. #### **--retry**=*attempts* -Number of times to retry in case of failure when performing pull of -images from registry. Default is **3**. +Number of times to retry pulling or pushing images between the registry and +local storage in case of failure. Default is **3**. diff --git a/docs/source/markdown/podman-build.1.md.in b/docs/source/markdown/podman-build.1.md.in index 67bafb5bde..102c0d02b1 100644 --- a/docs/source/markdown/podman-build.1.md.in +++ b/docs/source/markdown/podman-build.1.md.in @@ -313,8 +313,6 @@ the help of emulation provided by packages like `qemu-user-static`. @@option pull.image -Pull image policy. The default is **missing**. - @@option quiet @@option retry @@ -480,59 +478,107 @@ using the architecture variant of the build host. ### Build an image using local Containerfiles +Build image using Containerfile with content from current directory: ``` $ podman build . +``` +Build image using specified Containerfile with content from current directory: +``` $ podman build -f Containerfile.simple . +``` +Build image using Containerfile from stdin with content from current directory: +``` $ cat $HOME/Containerfile | podman build -f - . +``` +Build image using multiple Containerfiles with content from current directory: +``` $ podman build -f Containerfile.simple -f Containerfile.notsosimple . +``` +Build image with specified Containerfile with content from $HOME directory. Note `cpp` is applied to Containerfile.in before processing as Containerfile: +``` $ podman build -f Containerfile.in $HOME +``` +Build image with the specified tag with Containerfile and content from current directory: +``` $ podman build -t imageName . +``` -$ podman build --tls-verify=true -t imageName -f Containerfile.simple . - +Build image ignoring registry verification for any images pulled via the Containerfile: +``` $ podman build --tls-verify=false -t imageName . +``` +Build image with the specified logging format: +``` $ podman build --runtime-flag log-format=json . +``` +Build image using debug mode for logging: +``` $ podman build --runtime-flag debug . +``` +Build image using specified registry attributes when pulling images from the selected Containerfile: +``` $ podman build --authfile /tmp/auths/myauths.json --cert-dir $HOME/auth --tls-verify=true --creds=username:password -t imageName -f Containerfile.simple . +``` +Build image using specified resource controls when running containers during the build: +``` $ podman build --memory 40m --cpu-period 10000 --cpu-quota 50000 --ulimit nofile=1024:1028 -t imageName . +``` +Build image using specified SELinux labels and cgroup config running containers during the build: +``` $ podman build --security-opt label=level:s0:c100,c200 --cgroup-parent /path/to/cgroup/parent -t imageName . +``` +Build image with read-only and SELinux relabeled volume mounted from the host into running containers during the build: +``` $ podman build --volume /home/test:/myvol:ro,Z -t imageName . +``` +Build image with overlay volume mounted from the host into running containers during the build: +``` $ podman build -v /var/lib/yum:/var/lib/yum:O -t imageName . +``` -$ podman build --layers -t imageName . - -$ podman build --no-cache -t imageName . - +Build image using layers and then removing intermediate containers even if the build fails. +``` $ podman build --layers --force-rm -t imageName . +``` +Build image ignoring cache and not removing intermediate containers even if the build succeeds: +``` $ podman build --no-cache --rm=false -t imageName . +``` +Build image using the specified network when running containers during the build: +``` $ podman build --network mynet . ``` ### Building a multi-architecture image using the --manifest option (requires emulation software) +Build image using the specified architectures and link to a single manifest on successful completion: ``` $ podman build --arch arm --manifest myimage /tmp/mysrc - $ podman build --arch amd64 --manifest myimage /tmp/mysrc - $ podman build --arch s390x --manifest myimage /tmp/mysrc +``` +Similarly build using a single command +``` $ podman build --platform linux/s390x,linux/ppc64le,linux/amd64 --manifest myimage /tmp/mysrc +``` +Build image using multiple specified architectures and link to single manifest on successful completion: +``` $ podman build --platform linux/arm64 --platform linux/amd64 --manifest myimage /tmp/mysrc ``` @@ -548,9 +594,7 @@ context. #### Building an image using a URL to a Containerfile - Podman downloads the Containerfile to a temporary location and then use -it as the build context. - +Build image from Containerfile downloaded into temporary location used as the build context: ``` $ podman build https://10.10.10.1/podman/Containerfile ``` @@ -558,9 +602,10 @@ $ podman build https://10.10.10.1/podman/Containerfile #### Building an image using a Git repository Podman clones the specified GitHub repository to a temporary location and -use it as the context. The Containerfile at the root of the repository is used +uses it as the context. The Containerfile at the root of the repository is used and it only works if the GitHub repository is a dedicated repository. +Build image from specified git repository downloaded into temporary location used as the build context: ``` $ podman build -t hello https://github.com/containers/PodmanHello.git $ podman run hello @@ -570,7 +615,7 @@ $ podman run hello #### Building an image using a URL to an archive - Podman fetches the archive file, decompress it, and use its contents as the + Podman fetches the archive file, decompresses it, and uses its contents as the build context. The Containerfile at the root of the archive and the rest of the archive are used as the context of the build. Passing the `-f PATH/Containerfile` option as well tells the system to look for that file diff --git a/docs/source/markdown/podman-commit.1.md b/docs/source/markdown/podman-commit.1.md index ba66a745e2..8fa1c9bb63 100644 --- a/docs/source/markdown/podman-commit.1.md +++ b/docs/source/markdown/podman-commit.1.md @@ -78,7 +78,7 @@ Squash newly built layers into a single new layer.\ The default is **false**. ## EXAMPLES -Create image from container with entrypoint and label +Create image from container with entrypoint and label: ``` $ podman commit --change CMD=/bin/bash --change ENTRYPOINT=/bin/sh --change "LABEL blue=image" reverent_golick image-committed Getting image source signatures @@ -91,32 +91,32 @@ Storing signatures e3ce4d93051ceea088d1c242624d659be32cf1667ef62f1d16d6b60193e2c7a8 ``` -Create image from container with commit message +Create image from container with commit message: ``` $ podman commit -q --message "committing container to image" reverent_golick image-committed e3ce4d93051ceea088d1c242624d659be32cf1667ef62f1d16d6b60193e2c7a8 ``` -Create image from container with author +Create image from container with author: ``` $ podman commit -q --author "firstName lastName" reverent_golick image-committed e3ce4d93051ceea088d1c242624d659be32cf1667ef62f1d16d6b60193e2c7a8 ``` -Pause a running container while creating the image +Pause running container while creating image: ``` $ podman commit -q --pause=true containerID image-committed e3ce4d93051ceea088d1c242624d659be32cf1667ef62f1d16d6b60193e2c7a8 ``` -Create an image from a container with a default image tag +Create image from container with default image tag: ``` $ podman commit containerID e3ce4d93051ceea088d1c242624d659be32cf1667ef62f1d16d6b60193e2c7a8 ``` -Create an image from container with default required capabilities are SETUID and SETGID +Create image from container with default required capabilities: ``` $ podman commit -q --change LABEL=io.containers.capabilities=setuid,setgid epic_nobel privimage 400d31a3f36dca751435e80a0e16da4859beb51ff84670ce6bdc5edb30b94066 diff --git a/docs/source/markdown/podman-container-clone.1.md.in b/docs/source/markdown/podman-container-clone.1.md.in index 43ae1a65f6..c9098bcb3b 100644 --- a/docs/source/markdown/podman-container-clone.1.md.in +++ b/docs/source/markdown/podman-container-clone.1.md.in @@ -92,21 +92,26 @@ When set to true, this flag runs the newly created container after the clone process has completed, this specifies a detached running mode. ## EXAMPLES + +Clone specified container into a new container: ``` # podman container clone d0cf1f782e2ed67e8c0050ff92df865a039186237a4df24d7acba5b1fa8cc6e7 6b2c73ff8a1982828c9ae2092954bcd59836a131960f7e05221af9df5939c584 ``` +Clone specified container into a newly named container: ``` # podman container clone --name=clone d0cf1f782e2ed67e8c0050ff92df865a039186237a4df24d7acba5b1fa8cc6e7 6b2c73ff8a1982828c9ae2092954bcd59836a131960f7e05221af9df5939c584 ``` +Replace specified container with selected resource constraints into a new container, removing original container: ``` # podman container clone --destroy --cpus=5 d0cf1f782e2ed67e8c0050ff92df865a039186237a4df24d7acba5b1fa8cc6e7 6b2c73ff8a1982828c9ae2092954bcd59836a131960f7e05221af9df5939c584 ``` +Clone specified container giving a new name and then replacing the image of the original container with the specified image name: ``` # podman container clone 2d4d4fca7219b4437e0d74fcdc272c4f031426a6eacd207372691207079551de new_name fedora Resolved "fedora" as an alias (/etc/containers/registries.conf.d/shortnames.conf) @@ -118,6 +123,7 @@ Writing manifest to image destination Storing signatures 5a9b7851013d326aa4ac4565726765901b3ecc01fcbc0f237bc7fd95588a24f9 ``` + ## SEE ALSO **[podman-create(1)](podman-create.1.md)**, **[cgroups(7)](https://man7.org/linux/man-pages/man7/cgroups.7.html)** diff --git a/docs/source/markdown/podman-container-prune.1.md b/docs/source/markdown/podman-container-prune.1.md index 04ea4da01c..703e3e02a0 100644 --- a/docs/source/markdown/podman-container-prune.1.md +++ b/docs/source/markdown/podman-container-prune.1.md @@ -38,7 +38,7 @@ Print usage statement.\ The default is **false**. ## EXAMPLES -Remove all stopped containers from local storage +Remove all stopped containers from local storage: ``` $ podman container prune WARNING! This will remove all stopped containers. @@ -51,7 +51,7 @@ fff1c5b6c3631746055ec40598ce8ecaa4b82aef122f9e3a85b03b55c0d06c23 602d343cd47e7cb3dfc808282a9900a3e4555747787ec6723bb68cedab8384d5 ``` -Remove all stopped containers from local storage without confirmation. +Remove all stopped containers from local storage without confirmation: ``` $ podman container prune -f 878392adf2e6c5c9bb1fc19b69d37d2e98c8abf9d539c0bce4b15b46bbcce471 @@ -62,7 +62,7 @@ fff1c5b6c3631746055ec40598ce8ecaa4b82aef122f9e3a85b03b55c0d06c23 602d343cd47e7cb3dfc808282a9900a3e4555747787ec6723bb68cedab8384d5 ``` -Remove all stopped containers from local storage created before the last 10 minutes +Remove all stopped containers from local storage created before the last 10 minutes: ``` $ podman container prune --filter until="10m" WARNING! This will remove all stopped containers. diff --git a/docs/source/markdown/podman-cp.1.md b/docs/source/markdown/podman-cp.1.md index eaf996593c..14b81296a4 100644 --- a/docs/source/markdown/podman-cp.1.md +++ b/docs/source/markdown/podman-cp.1.md @@ -101,35 +101,35 @@ the cp command. ## EXAMPLES -- Copy a file from host to a container. - ``` - podman cp /myapp/app.conf containerID:/myapp/app.conf - ``` - -- Copy a file from a container to a directory on another container. - ``` - podman cp containerID1:/myfile.txt containerID2:/tmp - ``` - -- Copy a directory on a container to a directory on the host. - ``` - podman cp containerID:/myapp/ /myapp/ - ``` - -- Copy the contents of a directory on a container to a directory on the host. - ``` - podman cp containerID:/home/myuser/. /home/myuser/ - ``` - -- Copy a directory on a container into a directory on another. - ``` - podman cp containerA:/myapp containerB:/newapp - ``` - -- Stream a tar archive from `STDIN` to a container. - ``` - podman cp - containerID:/myfiles.tar.gz < myfiles.tar.gz - ``` +Copy a file from the host to a container: +``` +podman cp /myapp/app.conf containerID:/myapp/app.conf +``` + +Copy a file from a container to a directory on another container: +``` +podman cp containerID1:/myfile.txt containerID2:/tmp +``` + +Copy a directory on a container to a directory on the host: +``` +podman cp containerID:/myapp/ /myapp/ +``` + +Copy the contents of a directory on a container to a directory on the host: +``` +podman cp containerID:/home/myuser/. /home/myuser/ +``` + +Copy a directory on a container into a directory on another: +``` +podman cp containerA:/myapp containerB:/newapp +``` + +Stream a tar archive from `STDIN` to a container: +``` +podman cp - containerID:/myfiles.tar.gz < myfiles.tar.gz +``` ## SEE ALSO **[podman(1)](podman.1.md)**, **[podman-mount(1)](podman-mount.1.md)**, **[podman-unmount(1)](podman-unmount.1.md)** diff --git a/docs/source/markdown/podman-create.1.md.in b/docs/source/markdown/podman-create.1.md.in index 7c84072926..486eca3ddf 100644 --- a/docs/source/markdown/podman-create.1.md.in +++ b/docs/source/markdown/podman-create.1.md.in @@ -318,6 +318,10 @@ Suppress output information when pulling images @@option restart +@@option retry + +@@option retry-delay + #### **--rm** Automatically remove the container and any anonymous unnamed volume associated with @@ -387,59 +391,47 @@ Use the **--group-add keep-groups** option to pass the user's supplementary grou ## EXAMPLES -### Create a container using a local image - +Create a container using a local image: ``` $ podman create alpine ls ``` -### Create a container using a local image and annotate it - +Create a container using a local image and annotate it: ``` $ podman create --annotation HELLO=WORLD alpine ls ``` -### Create a container using a local image, allocating a pseudo-TTY, keeping stdin open and name it myctr - +Create a container using a local image, allocating a pseudo-TTY, keeping stdin open and name it myctr: ``` podman create -t -i --name myctr alpine ls ``` -### Set UID/GID mapping in a new user namespace - Running a container in a new user namespace requires a mapping of -the UIDs and GIDs from the host. - +the UIDs and GIDs from the host: ``` $ podman create --uidmap 0:30000:7000 --gidmap 0:30000:7000 fedora echo hello ``` -### Setting automatic user namespace separated containers - +Setting automatic user-namespace separated containers: ``` # podman create --userns=auto:size=65536 ubi8-init ``` -### Configure timezone in a container - +Configure the timezone in a container: ``` $ podman create --tz=local alpine date $ podman create --tz=Asia/Shanghai alpine date $ podman create --tz=US/Eastern alpine date ``` -### Adding dependency containers - -Podman makes sure the first container, container1, is running before the second container (container2) is started. - +Ensure the first container (container1) is running before the second container (container2) is started: ``` $ podman create --name container1 -t -i fedora bash $ podman create --name container2 --requires container1 -t -i fedora bash $ podman start --attach container2 ``` -Multiple containers can be required. - +Create a container which requires multiple containers: ``` $ podman create --name container1 -t -i fedora bash $ podman create --name container2 -t -i fedora bash @@ -447,32 +439,27 @@ $ podman create --name container3 --requires container1,container2 -t -i fedora $ podman start --attach container3 ``` -### Exposing shared libraries inside of container as read-only using a glob - +Expose shared libraries inside of container as read-only using a glob: ``` $ podman create --mount type=glob,src=/usr/lib64/libnvidia\*,ro -i -t fedora /bin/bash ``` -### Configure keep supplemental groups for access to volume - +Create a container allowing supplemental groups to have access to the volume: ``` $ podman create -v /var/lib/design:/var/lib/design --group-add keep-groups ubi8 ``` -### Configure execution domain for containers using personality flag - +Configure execution domain for containers using the personality option: ``` $ podman create --name container1 --personality=LINUX32 fedora bash ``` -### Create a container with external rootfs mounted as an overlay - +Create a container with external rootfs mounted as an overlay: ``` $ podman create --name container1 --rootfs /path/to/rootfs:O bash ``` -### Create a container connected to two networks (called net1 and net2) with a static ip - +Create a container connected to two networks (called net1 and net2) with a static ip: ``` $ podman create --network net1:ip=10.89.1.5 --network net2:ip=10.89.10.10 alpine ip addr ``` diff --git a/docs/source/markdown/podman-diff.1.md.in b/docs/source/markdown/podman-diff.1.md.in index 70de724bce..694d488a86 100644 --- a/docs/source/markdown/podman-diff.1.md.in +++ b/docs/source/markdown/podman-diff.1.md.in @@ -27,11 +27,13 @@ Alter the output into a different format. The only valid format for **podman di ## EXAMPLE +Show container-modified files versus the container's image: ``` $ podman diff container1 A /myscript.sh ``` +Show container-modified files versus the container's image in JSON format: ``` $ podman diff --format json myimage { @@ -46,6 +48,7 @@ $ podman diff --format json myimage } ``` +Show the difference between the specified container and the image: ``` $ podman diff container1 image1 A /test diff --git a/docs/source/markdown/podman-events.1.md b/docs/source/markdown/podman-events.1.md index 25e203cde9..24cbacdc19 100644 --- a/docs/source/markdown/podman-events.1.md +++ b/docs/source/markdown/podman-events.1.md @@ -47,6 +47,7 @@ The *container* event type reports the follow statuses: * sync * unmount * unpause + * update The *pod* event type reports the follow statuses: * create @@ -61,6 +62,7 @@ The *image* event type reports the following statuses: * loadFromArchive, * mount * pull + * pull-error * push * remove * save @@ -104,21 +106,22 @@ In the case where an ID is used, the ID may be in its full or shortened form. T Format the output to JSON Lines or using the given Go template. -| **Placeholder** | **Description** | -|-------------------------|-----------------------------------------------| -| .Attributes ... | created_at, _by, labels, and more (map[]) | -| .ContainerExitCode | Exit code (int) | -| .ContainerInspectData | Payload of the container's inspect | -| .HealthStatus | Health Status (string) | -| .ID | Container ID (full 64-bit SHA) | -| .Image | Name of image being run (string) | -| .Name | Container name (string) | -| .Network | Name of network being used (string) | -| .PodID | ID of pod associated with container, if any | -| .Status | Event status (e.g., create, start, died, ...) | -| .Time ... | Event timestamp (string) | -| .ToHumanReadable *bool* | If true, truncates CID in output | -| .Type | Event type (e.g., image, container, pod, ...) | +| **Placeholder** | **Description** | +| --------------------- | -------------------------------------------------------------------- | +| .Attributes ... | created_at, _by, labels, and more (map[]) | +| .ContainerExitCode | Exit code (int) | +| .ContainerInspectData | Payload of the container's inspect | +| .Error | Error message in case the event status is an error (e.g. pull-error) | +| .HealthStatus | Health Status (string) | +| .ID | Container ID (full 64-bit SHA) | +| .Image | Name of image being run (string) | +| .Name | Container name (string) | +| .Network | Name of network being used (string) | +| .PodID | ID of pod associated with container, if any | +| .Status | Event status (e.g., create, start, died, ...) | +| .Time | Event timestamp (string) | +| .TimeNano | Event timestamp with nanosecond precision (int64) | +| .Type | Event type (e.g., image, container, pod, ...) | #### **--help** @@ -164,7 +167,7 @@ The journald events-backend of Podman uses the following journald identifiers. ## EXAMPLES -Showing Podman events +Show Podman events: ``` $ podman events 2019-03-02 10:33:42.312377447 -0600 CST container create 34503c192940 (image=docker.io/library/alpine:latest, name=friendly_allen) @@ -174,7 +177,7 @@ $ podman events 2019-03-02 10:33:51.047104966 -0600 CST container cleanup 34503c192940 (image=docker.io/library/alpine:latest, name=friendly_allen) ``` -Show only Podman create events +Show only Podman container create events: ``` $ podman events -f event=create 2019-03-02 10:36:01.375685062 -0600 CST container create 20dc581f6fbf (image=docker.io/library/alpine:latest, name=sharp_morse) @@ -183,7 +186,7 @@ $ podman events -f event=create 2019-03-02 10:36:29.978806894 -0600 CST container create d81e30f1310f (image=docker.io/library/busybox:latest, name=musing_newton) ``` -Show only Podman pod create events +Show only Podman pod create events: ``` $ podman events --filter event=create --filter type=pod 2019-03-02 10:44:29.601746633 -0600 CST pod create 1df5ebca7b44 (image=, name=confident_hawking) @@ -200,7 +203,7 @@ $ sudo podman events --since 5m 2019-03-02 10:44:42.374637304 -0600 CST pod create ca731231718e (image=, name=webapp) ``` -Show Podman events in JSON Lines format +Show Podman events in JSON Lines format: ``` $ podman events --format json {"ID":"683b0909d556a9c02fa8cd2b61c3531a965db42158627622d1a67b391964d519","Image":"localhost/myshdemo:latest","Name":"agitated_diffie","Status":"cleanup","Time":"2019-04-27T22:47:00.849932843-04:00","Type":"container"} diff --git a/docs/source/markdown/podman-export.1.md b/docs/source/markdown/podman-export.1.md index d024d0256a..fdf87c6d3c 100644 --- a/docs/source/markdown/podman-export.1.md +++ b/docs/source/markdown/podman-export.1.md @@ -34,9 +34,13 @@ Write to a file, default is STDOUT ## EXAMPLES +Export container into specified tar ball: ``` $ podman export -o redis-container.tar 883504668ec465463bc0fe7e63d53154ac3b696ea8d7b233748918664ea90e57 +``` +Export container to stdout: +``` $ podman export 883504668ec465463bc0fe7e63d53154ac3b696ea8d7b233748918664ea90e57 > redis-container.tar ``` diff --git a/docs/source/markdown/podman-generate-systemd.1.md b/docs/source/markdown/podman-generate-systemd.1.md index 3fc491246d..cd6c637bd7 100644 --- a/docs/source/markdown/podman-generate-systemd.1.md +++ b/docs/source/markdown/podman-generate-systemd.1.md @@ -9,7 +9,8 @@ podman\-generate\-systemd - [DEPRECATED] Generate systemd unit file(s) for a con ## DESCRIPTION DEPRECATED: Note: **podman generate systemd** is deprecated. We recommend using [Quadlet](podman-systemd.unit.5.md) -files when running Podman containers or pods under systemd. +files when running Podman containers or pods under systemd. There are no plans to remove the command. +It will receive urgent bug fixes but no new features. **podman generate systemd** creates a systemd unit file that can be used to control a container or pod. By default, the command prints the content of the unit files to stdout. diff --git a/docs/source/markdown/podman-history.1.md b/docs/source/markdown/podman-history.1.md index ef40f9783f..b9d567151e 100644 --- a/docs/source/markdown/podman-history.1.md +++ b/docs/source/markdown/podman-history.1.md @@ -54,6 +54,7 @@ Print the numeric IDs only (default *false*). ## EXAMPLES +Show the history of the specified image: ``` $ podman history debian ID CREATED CREATED BY SIZE COMMENT @@ -61,6 +62,7 @@ b676ca55e4f2c 9 weeks ago /bin/sh -c #(nop) CMD ["bash"] 0 9 weeks ago /bin/sh -c #(nop) ADD file:ebba725fb97cea4... 45.14 MB ``` +Show the history of the specified image without truncating content and using raw data: ``` $ podman history --no-trunc=true --human=false debian ID CREATED CREATED BY SIZE COMMENT @@ -68,12 +70,14 @@ b676ca55e4f2c 2017-07-24T16:52:55Z /bin/sh -c #(nop) CMD ["bash"] 2017-07-24T16:52:54Z /bin/sh -c #(nop) ADD file:ebba725fb97cea4... 45142935 ``` +Show the formatted history of the specified image: ``` $ podman history --format "{{.ID}} {{.Created}}" debian b676ca55e4f2c 9 weeks ago 9 weeks ago ``` +Show the history in JSON format for the specified image: ``` $ podman history --format json debian [ diff --git a/docs/source/markdown/podman-images.1.md.in b/docs/source/markdown/podman-images.1.md.in index 5a1a47e7e9..61f916c1b8 100644 --- a/docs/source/markdown/podman-images.1.md.in +++ b/docs/source/markdown/podman-images.1.md.in @@ -122,6 +122,7 @@ Sort by *created*, *id*, *repository*, *size* or *tag* (default: **created**) ## EXAMPLE +List all non-dangling images in local storage: ``` $ podman images REPOSITORY TAG IMAGE ID CREATED SIZE @@ -131,12 +132,14 @@ registry.fedoraproject.org/fedora latest 2ecb6df95994 3 weeks ago 169 M quay.io/libpod/testimage 20220615 f26aa69bb3f3 2 months ago 8.4 MB ``` +List all images matching the specified name: ``` $ podman images stable REPOSITORY TAG IMAGE ID CREATED SIZE quay.io/podman/stable latest e0b7dabc3352 22 hours ago 331 MB ``` +List image ids of all images in containers storage: ``` # podman image ls --quiet e3d42bcaf643 @@ -144,6 +147,7 @@ ebb91b73692b 4526339ae51c ``` +List all images without showing the headers: ``` # podman images --noheading docker.io/kubernetes/pause latest e3d42bcaf643 3 years ago 251 kB @@ -151,6 +155,7 @@ docker.io/kubernetes/pause latest e3d42bcaf643 3 years ago docker.io/library/ubuntu latest 4526339ae51c 6 weeks ago 126 MB ``` +List all images without truncating output: ``` # podman image list --no-trunc REPOSITORY TAG IMAGE ID CREATED SIZE @@ -159,6 +164,7 @@ docker.io/kubernetes/pause latest sha256:e3d42bcaf643097dd1b docker.io/library/ubuntu latest sha256:4526339ae51c3cdc97956a7a961c193c39dfc6bd9733b0d762a36c6881b5583a 6 weeks ago 126 MB ``` +List all image content with the formatted content: ``` # podman images --format "table {{.ID}} {{.Repository}} {{.Tag}}" IMAGE ID REPOSITORY TAG @@ -167,12 +173,14 @@ ebb91b73692b 4526339ae51c docker.io/library/ubuntu latest ``` +List any image that is not tagged with a name (dangling): ``` # podman images --filter dangling=true REPOSITORY TAG IMAGE ID CREATED SIZE ebb91b73692b 4 weeks ago 27.2 MB ``` +List all images in JSON format: ``` # podman images --format json [ @@ -206,6 +214,7 @@ REPOSITORY TAG IMAGE ID CREATED SIZE ] ``` +List all images sorted by the specified column: ``` # podman images --sort repository REPOSITORY TAG IMAGE ID CREATED SIZE @@ -216,6 +225,7 @@ registry.access.redhat.com/rhel7 latest 7a840db7f020 2 weeks ago registry.fedoraproject.org/fedora 27 801894bc0e43 6 weeks ago 246 MB ``` +Show the difference between listed images in use versus all images, including dangling images: ``` # podman images REPOSITORY TAG IMAGE ID CREATED SIZE diff --git a/docs/source/markdown/podman-import.1.md b/docs/source/markdown/podman-import.1.md index 60b34013c8..5669b920b9 100644 --- a/docs/source/markdown/podman-import.1.md +++ b/docs/source/markdown/podman-import.1.md @@ -52,6 +52,7 @@ Set variant of the imported image. ## EXAMPLES +Import the selected tarball into new image, specifying the CMD, ENTRYPOINT and LABEL: ``` $ podman import --change CMD=/bin/bash --change ENTRYPOINT=/bin/sh --change LABEL=blue=image ctr.tar image-imported Getting image source signatures @@ -64,6 +65,7 @@ Storing signatures db65d991f3bbf7f31ed1064db9a6ced7652e3f8166c4736aa9133dadd3c7acb3 ``` +Import the selected tarball into new image, specifying the CMD, ENTRYPOINT and LABEL: ``` $ podman import --change 'ENTRYPOINT ["/bin/sh","-c","test-image"]' --change LABEL=blue=image test-image.tar image-imported Getting image source signatures @@ -73,22 +75,14 @@ Writing manifest to image destination Storing signatures 110552350206337183ceadc0bdd646dc356e06514c548b69a8917b4182414b ``` -``` -$ podman import --change "CMD /bin/sh" --change LABEL=blue=image test-image.tar image-imported -Getting image source signatures -Copying blob e3b0c44298fc skipped: already exists -Copying config ae9a27e249 done -Writing manifest to image destination -Storing signatures -ae9a27e249f801aff11a4ba54a81751ea9fbc9db45a6df3f1bfd63fc2437bb9c -``` - +Import new tagged image from stdin in quiet mode: ``` -$ cat ctr.tar | podman -q import --message "importing the ctr.tar tarball" - image-imported +$ cat ctr.tar | podman -q import --message "importing the ctr.tar file" - image-imported db65d991f3bbf7f31ed1064db9a6ced7652e3f8166c4736aa9133dadd3c7acb3 ``` +Import an image from stdin: ``` $ cat ctr.tar | podman import - Getting image source signatures @@ -101,6 +95,7 @@ Storing signatures db65d991f3bbf7f31ed1064db9a6ced7652e3f8166c4736aa9133dadd3c7acb3 ``` +Import named image from tarball via a URL: ``` $ podman import http://example.com/ctr.tar url-image Downloading from "http://example.com/ctr.tar" diff --git a/docs/source/markdown/podman-inspect.1.md.in b/docs/source/markdown/podman-inspect.1.md.in index 2a6333d5e0..bc3dbcdb1a 100644 --- a/docs/source/markdown/podman-inspect.1.md.in +++ b/docs/source/markdown/podman-inspect.1.md.in @@ -40,7 +40,7 @@ Return JSON for the specified type. Type can be 'container', 'image', 'volume', ## EXAMPLE -Inspect the fedora image +Inspect the fedora image: ``` # podman inspect fedora [ @@ -122,43 +122,43 @@ Inspect the fedora image ] ``` -Inspect the specified image for Image Name. +Inspect the specified image with the `ImageName` format specifier: ``` # podman inspect a04 --format "{{.ImageName}}" fedora ``` -Inspect the specified image for GraphDriver Name. +Inspect the specified image for `GraphDriver` format specifier: ``` # podman inspect a04 --format "{{.GraphDriver.Name}}" overlay ``` -Inspect the specified image for its Size field. +Inspect the specified image for its `Size` format specifier: ``` # podman image inspect --format "size: {{.Size}}" alpine size: 4405240 ``` -Inspect the latest container created for its EffectiveCaps field. (This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines) +Inspect the latest container created for `EffectiveCaps` format specifier. (This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines): ``` podman container inspect --latest --format {{.EffectiveCaps}} [CAP_CHOWN CAP_DAC_OVERRIDE CAP_FSETID CAP_FOWNER CAP_SETGID CAP_SETUID CAP_SETFCAP CAP_SETPCAP CAP_NET_BIND_SERVICE CAP_KILL] ``` -Inspect the specified pod for the Name field. +Inspect the specified pod for the `Name` format specifier: ``` # podman inspect myPod --type pod --format "{{.Name}}" myPod ``` -Inspect the specified volume for the Name field. +Inspect the specified volume for the `Name` format specifier: ``` # podman inspect myVolume --type volume --format "{{.Name}}" myVolume ``` -Inspect the specified network for the Name field. +Inspect the specified network for the `Name` format specifier: ``` # podman inspect nyNetwork --type network --format "{{.name}}" myNetwork diff --git a/docs/source/markdown/podman-kill.1.md.in b/docs/source/markdown/podman-kill.1.md.in index 2275e06ded..7d03024c37 100644 --- a/docs/source/markdown/podman-kill.1.md.in +++ b/docs/source/markdown/podman-kill.1.md.in @@ -24,33 +24,33 @@ Signal all running and paused containers. ## EXAMPLE -Kill container with a given name +Kill container with a given name: ``` podman kill mywebserver ``` -Kill container with a given ID +Kill container with a given ID: ``` podman kill 860a4b23 ``` -Terminate container by sending `TERM` signal +Terminate container by sending `TERM` signal: ``` podman kill --signal TERM 860a4b23 ``` Kill the latest container. (This option is not available with the remote Podman client, including Mac and Windows -(excluding WSL2) machines) +(excluding WSL2) machines): ``` podman kill --latest ``` -Terminate all containers by sending `KILL` signal +Terminate all containers by sending `KILL` signal: ``` podman kill --signal KILL -a ``` -Kill container using ID specified in a given files +Kill containers using ID specified in a given files: ``` podman kill --cidfile /home/user/cidfile-1 podman kill --cidfile /home/user/cidfile-1 --cidfile ./cidfile-2 diff --git a/docs/source/markdown/podman-kube-generate.1.md b/docs/source/markdown/podman-kube-generate.1.md index 78f2bb1f70..f0f11e506f 100644 --- a/docs/source/markdown/podman-kube-generate.1.md +++ b/docs/source/markdown/podman-kube-generate.1.md @@ -39,11 +39,6 @@ Also note that both Deployment and DaemonSet can only have `restartPolicy` set t Output to the given file instead of STDOUT. If the file already exists, `kube generate` refuses to replace it and returns an error. -#### **--no-trunc** - -Don't truncate annotations to the Kubernetes maximum length of 63 characters. -Note: enabling this flag means the generated YAML file is not Kubernetes compatible and can only be used with `podman kube play` - #### **--podman-only** Add podman-only reserved annotations in generated YAML file (Cannot be used by Kubernetes) diff --git a/docs/source/markdown/podman-kube-play.1.md.in b/docs/source/markdown/podman-kube-play.1.md.in index 93b62a4361..21a929a137 100644 --- a/docs/source/markdown/podman-kube-play.1.md.in +++ b/docs/source/markdown/podman-kube-play.1.md.in @@ -39,7 +39,7 @@ Note: When playing a kube YAML with init containers, the init container is creat Note: *hostPath* volume types created by kube play is given an SELinux shared label (z), bind mounts are not relabeled (use `chcon -t container_file_t -R `). -Note: To set userns of a pod, use the **io.podman.annotations.userns** annotation in the pod/deployment definition. This can be overridden with the `--userns` flag. +Note: To set userns of a pod, use the **io.podman.annotations.userns** annotation in the pod/deployment definition. For example, **io.podman.annotations.userns=keep-id** annotation tells Podman to create a user namespace where the current rootless user's UID:GID are mapped to the same values in the container. This can be overridden with the `--userns` flag. Note: Use the **io.podman.annotations.volumes-from** annotation to bind mount volumes of one container to another. You can mount volumes from multiple source containers to a target container. The source containers that belong to the same pod must be defined before the source container in the kube YAML. The annotation format is `io.podman.annotations.volumes-from/targetContainer: "sourceContainer1:mountOpts1;sourceContainer2:mountOpts2"`. @@ -158,6 +158,17 @@ spec: and as a result environment variable `FOO` is set to `bar` for container `container-1`. +`Automounting Volumes` + +An image can be automatically mounted into a container if the annotation `io.podman.annotations.kube.image.automount/$ctrname` is given. The following rules apply: + +- The image must already exist locally. +- The image must have at least 1 volume directive. +- The path given by the volume directive will be mounted from the image into the container. For example, an image with a volume at `/test/test_dir` will have `/test/test_dir` in the image mounted to `/test/test_dir` in the container. +- Multiple images can be specified. If multiple images have a volume at a specific path, the last image specified trumps. +- The images are always mounted read-only. +- Images to mount are defined in the annotation "io.podman.annotations.kube.image.automount/$ctrname" as a semicolon-separated list. They are mounted into a single container in the pod, not the whole pod. The annotation can be specified for additional containers if additional mounts are required. + ## OPTIONS @@option annotation.container @@ -217,10 +228,6 @@ When no network option is specified and *host* network mode is not configured in This option conflicts with host added in the Kubernetes YAML. -#### **--no-trunc** - -Use annotations that are not truncated to the Kubernetes maximum length of 63 characters - #### **--publish**=*[[ip:][hostPort]:]containerPort[/protocol]* Define or override a port definition in the YAML file. diff --git a/docs/source/markdown/podman-kube.1.md b/docs/source/markdown/podman-kube.1.md index 3a776ef892..36a3808443 100644 --- a/docs/source/markdown/podman-kube.1.md +++ b/docs/source/markdown/podman-kube.1.md @@ -10,6 +10,10 @@ podman\-kube - Play containers, pods or volumes based on a structured input file The kube command recreates containers, pods or volumes based on the input from a structured (like YAML) file input. Containers are automatically started. +Note: The kube commands in podman focus on simplifying the process of moving containers from podman to a Kubernetes +environment and from a Kubernetes environment back to podman. Podman is not replicating the kubectl CLI. Once containers +are deployed to a Kubernetes cluster from podman, please use `kubectl` to manage the workloads in the cluster. + ## COMMANDS | Command | Man Page | Description | diff --git a/docs/source/markdown/podman-login.1.md.in b/docs/source/markdown/podman-login.1.md.in index 999db987f2..ad94573423 100644 --- a/docs/source/markdown/podman-login.1.md.in +++ b/docs/source/markdown/podman-login.1.md.in @@ -69,15 +69,28 @@ print detailed information about credential store ## EXAMPLES -Add login credentials for specified registry to the default authorization file. +Add login credentials for specified registry to default authentication file; +note that unlike the `docker` default, the default credentials are under `$XDG_RUNTIME_DIR` +which is a subdirectory of `/run` (an emphemeral directory) and hence do not persist across reboot. + +``` +$ podman login quay.io +Username: umohnani +Password: +Login Succeeded! +``` + +To explicitly preserve credentials across reboot, you will need to specify +the default persistent path: + ``` -$ podman login docker.io +$ podman login --authfile ~/.config/containers/auth.json quay.io Username: umohnani Password: Login Succeeded! ``` -Add login credentials using specified username and password for local registry to the default authorization file. +Add login credentials using specified username and password for local registry to default authentication file. ``` $ podman login -u testuser -p testpassword localhost:5000 Login Succeeded! @@ -111,19 +124,19 @@ $ podman login --cert-dir /etc/containers/certs.d/ -u foo -p bar localhost:5000 Login Succeeded! ``` -Add login credentials for docker.io into the default authorization file for user testuser with password information provided via stdin from a file on disk. +Add login credentials for specified registries to default authentication file for given user with password information provided via stdin from a file on disk. ``` $ podman login -u testuser --password-stdin < testpassword.txt docker.io Login Succeeded! ``` -Add login credentials for docker.io into the default authorization file for user testuser with password information provided via stdin from a pipe. +Add login credentials for specified registry to default authentication file for given user with password information provided via stdin from a pipe. ``` -$ echo $testpassword | podman login -u testuser --password-stdin docker.io +$ echo $testpassword | podman login -u testuser --password-stdin quay.io Login Succeeded! ``` -Add login credentials for the quay.io registry in verbose mode default authorization file. +Add login credentials for specified registry to default authentication file in verbose mode. ``` $ podman login quay.io --verbose Username: myusername diff --git a/docs/source/markdown/podman-logout.1.md.in b/docs/source/markdown/podman-logout.1.md.in index 616cf2cc91..36ba717304 100644 --- a/docs/source/markdown/podman-logout.1.md.in +++ b/docs/source/markdown/podman-logout.1.md.in @@ -37,17 +37,17 @@ Print usage statement ## EXAMPLES -Remove login credentials for the docker.io registry from the authorization file +Remove login credentials for the docker.io registry from the authentication file: ``` $ podman logout docker.io ``` -Remove login credentials for the docker.io registry from the authdir/myauths.json file. +Remove login credentials for the docker.io registry from the authdir/myauths.json file: ``` $ podman logout --authfile authdir/myauths.json docker.io ``` -Remove login credentials for all registries. +Remove login credentials for all registries: ``` $ podman logout --all ``` diff --git a/docs/source/markdown/podman-machine-init.1.md.in b/docs/source/markdown/podman-machine-init.1.md.in index 17fde730d7..ae8a1bf639 100644 --- a/docs/source/markdown/podman-machine-init.1.md.in +++ b/docs/source/markdown/podman-machine-init.1.md.in @@ -73,11 +73,10 @@ Fully qualified path of the ignition file. If an ignition file is provided, the file is copied into the user's CONF_DIR and renamed. Additionally, no SSH keys are generated, nor are any system connections made. It is assumed that the user does these things manually or handled otherwise. -#### **--image-path** +#### **--image** -Fully qualified path or URL to the VM image. -Can also be set to `testing`, `next`, or `stable` to pull down default image. -Defaults to `testing`. +Fully qualified registry, path, or URL to a VM image. +Registry target must be in the form of `docker://registry/repo/image:version`. #### **--memory**, **-m**=*number* diff --git a/docs/source/markdown/podman-machine-inspect.1.md b/docs/source/markdown/podman-machine-inspect.1.md index fdf682c26a..2a5bb34639 100644 --- a/docs/source/markdown/podman-machine-inspect.1.md +++ b/docs/source/markdown/podman-machine-inspect.1.md @@ -25,14 +25,14 @@ Print results with a Go template. | **Placeholder** | **Description** | | ------------------- | --------------------------------------------------------------------- | -| .ConfigPath ... | Machine configuration file location | +| .ConfigDir ... | Machine configuration directory location | | .ConnectionInfo ... | Machine connection information | | .Created ... | Machine creation time (string, ISO3601) | -| .Image ... | Machine image config | | .LastUp ... | Time when machine was last booted | | .Name | Name of the machine | | .Resources ... | Resources used by the machine | | .Rootful | Whether the machine prefers rootful or rootless container execution | +| .Rosetta | Whether this machine uses Rosetta | | .SSHConfig ... | SSH configuration info for communicating with machine | | .State | Machine state | | .UserModeNetworking | Whether this machine uses user-mode networking | diff --git a/docs/source/markdown/podman-manifest-add.1.md.in b/docs/source/markdown/podman-manifest-add.1.md.in index 4b4012b8e2..4c9e6c09de 100644 --- a/docs/source/markdown/podman-manifest-add.1.md.in +++ b/docs/source/markdown/podman-manifest-add.1.md.in @@ -1,16 +1,18 @@ % podman-manifest-add 1 ## NAME -podman\-manifest\-add - Add an image to a manifest list or image index +podman\-manifest\-add - Add an image or artifact to a manifest list or image index ## SYNOPSIS -**podman manifest add** [*options*] *listnameorindexname* [*transport*]:*imagename* +**podman manifest add** [*options*] *listnameorindexname* [*transport*]:*imagename* *imageorartifactname* [...] ## DESCRIPTION -Adds the specified image to the specified manifest list or image index. +Adds the specified image to the specified manifest list or image index, or +creates an artifact manifest and adds it to the specified image index. ## RETURN VALUE + The list image's ID. ## OPTIONS @@ -24,13 +26,62 @@ from such a list or index is added to the list or index. Combining @@option annotation.manifest -#### **--arch** +#### **--arch**=*architecture* Override the architecture which the list or index records as a requirement for the image. If *imageName* refers to a manifest list or image index, the architecture information is retrieved from it. Otherwise, it is retrieved from the image's configuration information. +#### **--artifact** + +Create an artifact manifest and add it to the image index. Arguments after the +index name will be interpreted as file names rather than as image references. +In most scenarios, the **--artifact-type** option should also be specified. + +#### **--artifact-config**=*path* + +When creating an artifact manifest and adding it to the image index, use the +specified file's contents as the configuration blob in the artifact manifest. +In most scenarios, leaving the default value, which signifies an empty +configuration, unchanged, is the preferred option. + +#### **--artifact-config-type**=*type* + +When creating an artifact manifest and adding it to the image index, use the +specified MIME type as the `mediaType` associated with the configuration blob +in the artifact manifest. In most scenarios, leaving the default value, which +signifies either an empty configuration or the standard OCI configuration type, +unchanged, is the preferred option. + +#### **--artifact-exclude-titles** + +When creating an artifact manifest and adding it to the image index, do not +set "org.opencontainers.image.title" annotations equal to the file's basename +for each file added to the artifact manifest. Tools which retrieve artifacts +from a registry may use these values to choose names for files when saving +artifacts to disk, so this option is not recommended unless it is required +for interoperability with a particular registry. + +#### **--artifact-layer-type**=*type* + +When creating an artifact manifest and adding it to the image index, use the +specified MIME type as the `mediaType` associated with the files' contents. If +not specified, guesses based on either the files names or their contents will +be made and used, but the option should be specified if certainty is needed. + +#### **--artifact-subject**=*imageName* + +When creating an artifact manifest and adding it to the image index, set the +*subject* field in the artifact manifest to mark the artifact manifest as being +associated with the specified image in some way. An artifact manifest can only +be associated with, at most, one subject. + +#### **--artifact-type**=*type* + +When creating an artifact manifest, use the specified MIME type as the +manifest's `artifactType` value instead of the less informative default value. + @@option authfile @@option cert-dir @@ -39,7 +90,7 @@ retrieved from the image's configuration information. @@option features -#### **--os** +#### **--os**=*OS* Override the OS which the list or index records as a requirement for the image. If *imagename* refers to a manifest list or image index, the OS information diff --git a/docs/source/markdown/podman-manifest-annotate.1.md.in b/docs/source/markdown/podman-manifest-annotate.1.md.in index 7af709bae3..38a186aeee 100644 --- a/docs/source/markdown/podman-manifest-annotate.1.md.in +++ b/docs/source/markdown/podman-manifest-annotate.1.md.in @@ -1,41 +1,53 @@ % podman-manifest-annotate 1 ## NAME -podman\-manifest\-annotate - Add or update information about an entry in a manifest list or image index +podman\-manifest\-annotate - Add and update information about an image or artifact in a manifest list or image index ## SYNOPSIS -**podman manifest annotate** [*options*] *listnameorindexname* *imagemanifestdigest* +**podman manifest annotate** [*options*] *listnameorindexname* *imagemanifestdigestorimageorartifactname* ## DESCRIPTION -Adds or updates information about an image included in a manifest list or image index. +Adds or updates information about an image or artifact included in a manifest list or image index. ## OPTIONS @@option annotation.manifest +If **--index** is also specified, sets the annotation on the entire image index. -#### **--arch** +#### **--arch**=*architecture* Override the architecture which the list or index records as a requirement for the image. This is usually automatically retrieved from the image's configuration information, so it is rarely necessary to use this option. - @@option features -#### **--os** +#### **--index** + +Treats arguments to the **--annotation** option as annotation values to be set +on the image index itself rather than on an entry in the image index. Implied +for **--subject**. + +#### **--os**=*OS* Override the OS which the list or index records as a requirement for the image. This is usually automatically retrieved from the image's configuration information, so it is rarely necessary to use this option. -#### **--os-features** +#### **--os-features**=*feature* Specify the OS features list which the list or index records as requirements for the image. This option is rarely used. @@option os-version +#### **--subject**=*imageName* + +Set the *subject* field in the image index to mark the image index as being +associated with the specified image in some way. An image index can only be +associated with, at most, one subject. + @@option variant.manifest ## EXAMPLE diff --git a/docs/source/markdown/podman-manifest-create.1.md.in b/docs/source/markdown/podman-manifest-create.1.md.in index 3d9e620970..9944357c40 100644 --- a/docs/source/markdown/podman-manifest-create.1.md.in +++ b/docs/source/markdown/podman-manifest-create.1.md.in @@ -28,6 +28,10 @@ If a manifest list named *listnameorindexname* already exists, modify the preexisting list instead of exiting with an error. The contents of *listnameorindexname* are not modified if no *imagename*s are given. +#### **--annotation**=*value* + +Set an annotation on the newly-created image index. + @@option tls-verify ## EXAMPLES diff --git a/docs/source/markdown/podman-manifest.1.md b/docs/source/markdown/podman-manifest.1.md index 26248a4c4f..8b6ef489ad 100644 --- a/docs/source/markdown/podman-manifest.1.md +++ b/docs/source/markdown/podman-manifest.1.md @@ -13,16 +13,16 @@ The `podman manifest` command provides subcommands which can be used to: ## SUBCOMMANDS -| Command | Man Page | Description | -| -------- | ------------------------------------------------------------ | --------------------------------------------------------------------------- | -| add | [podman-manifest-add(1)](podman-manifest-add.1.md) | Add an image to a manifest list or image index. | -| annotate | [podman-manifest-annotate(1)](podman-manifest-annotate.1.md) | Add or update information about an entry in a manifest list or image index. | -| create | [podman-manifest-create(1)](podman-manifest-create.1.md) | Create a manifest list or image index. | -| exists | [podman-manifest-exists(1)](podman-manifest-exists.1.md) | Check if the given manifest list exists in local storage | -| inspect | [podman-manifest-inspect(1)](podman-manifest-inspect.1.md) | Display a manifest list or image index. | -| push | [podman-manifest-push(1)](podman-manifest-push.1.md) | Push a manifest list or image index to a registry. | -| remove | [podman-manifest-remove(1)](podman-manifest-remove.1.md) | Remove an image from a manifest list or image index. | -| rm | [podman-manifest-rm(1)](podman-manifest-rm.1.md) | Remove manifest list or image index from local storage. | +| Command | Man Page | Description | +| -------- | ------------------------------------------------------------ | ---------------------------------------------------------------------------------------- | +| add | [podman-manifest-add(1)](podman-manifest-add.1.md) | Add an image or artifact to a manifest list or image index. | +| annotate | [podman-manifest-annotate(1)](podman-manifest-annotate.1.md) | Add and update information about an image or artifact in a manifest list or image index. | +| create | [podman-manifest-create(1)](podman-manifest-create.1.md) | Create a manifest list or image index. | +| exists | [podman-manifest-exists(1)](podman-manifest-exists.1.md) | Check if the given manifest list exists in local storage | +| inspect | [podman-manifest-inspect(1)](podman-manifest-inspect.1.md) | Display a manifest list or image index. | +| push | [podman-manifest-push(1)](podman-manifest-push.1.md) | Push a manifest list or image index to a registry. | +| remove | [podman-manifest-remove(1)](podman-manifest-remove.1.md) | Remove an image from a manifest list or image index. | +| rm | [podman-manifest-rm(1)](podman-manifest-rm.1.md) | Remove manifest list or image index from local storage. | ## EXAMPLES diff --git a/docs/source/markdown/podman-mount.1.md.in b/docs/source/markdown/podman-mount.1.md.in index cd535311f1..48e6ce4c8d 100644 --- a/docs/source/markdown/podman-mount.1.md.in +++ b/docs/source/markdown/podman-mount.1.md.in @@ -41,31 +41,32 @@ Do not truncate the output (default *false*). ## EXAMPLE +In rootful mode, Mount specified container. ``` -podman mount c831414b10a3 - +# podman mount c831414b10a3 /var/lib/containers/storage/overlay/f3ac502d97b5681989dff84dfedc8354239bcecbdc2692f9a639f4e080a02364/merged ``` +In rootless mode, container mounting only works from inside the user namespace. +``` +$ podman unshare +# podman mount affectionate_mcnulty +/home/dwalsh/.local/share/containers/storage/overlay/4218326b9a80619aef005ff95067f76687ad975ce101c176598fb416f6186906/merged ``` -podman mount +List the currently mounted containers: +``` +podman mount c831414b10a3 /var/lib/containers/storage/overlay/f3ac502d97b5681989dff84dfedc8354239bcecbdc2692f9a639f4e080a02364/merged a7060253093b /var/lib/containers/storage/overlay/0ff7d7ca68bed1ace424f9df154d2dd7b5a125c19d887f17653cbcd5b6e30ba1/merged ``` + +Mount multiple containers: ``` podman mount c831414b10a3 a7060253093b - /var/lib/containers/storage/overlay/f3ac502d97b5681989dff84dfedc8354239bcecbdc2692f9a639f4e080a02364/merged /var/lib/containers/storage/overlay/0ff7d7ca68bed1ace424f9df154d2dd7b5a125c19d887f17653cbcd5b6e30ba1/merged ``` -``` -podman mount - -c831414b10a3 /var/lib/containers/storage/overlay/f3ac502d97b5681989dff84dfedc8354239bcecbdc2692f9a639f4e080a02364/merged -a7060253093b /var/lib/containers/storage/overlay/0ff7d7ca68bed1ace424f9df154d2dd7b5a125c19d887f17653cbcd5b6e30ba1/merged -``` - ## SEE ALSO **[podman(1)](podman.1.md)**, **[podman-unmount(1)](podman-unmount.1.md)**, **[podman-unshare(1)](podman-unshare.1.md)**, **mount(8)** diff --git a/docs/source/markdown/podman-network-inspect.1.md b/docs/source/markdown/podman-network-inspect.1.md index 7a3f0445b9..2350764485 100644 --- a/docs/source/markdown/podman-network-inspect.1.md +++ b/docs/source/markdown/podman-network-inspect.1.md @@ -16,6 +16,7 @@ Pretty-print networks to JSON or using a Go template. | **Placeholder** | **Description** | |--------------------|-------------------------------------------| +| .Containers ... | Running containers on this network. | | .Created ... | Timestamp when the network was created | | .DNSEnabled | Network has dns enabled (boolean) | | .Driver | Network driver | @@ -25,6 +26,7 @@ Pretty-print networks to JSON or using a Go template. | .IPv6Enabled | Network has ipv6 subnet (boolean) | | .Labels ... | Network labels | | .Name | Network name | +| .Network ... | Nested Network type | | .NetworkDNSServers | Array of DNS servers used in this network | | .NetworkInterface | Name of the network interface on the host | | .Options ... | Network options | diff --git a/docs/source/markdown/podman-network.1.md b/docs/source/markdown/podman-network.1.md index 03314fe0c7..0dedbb8dc4 100644 --- a/docs/source/markdown/podman-network.1.md +++ b/docs/source/markdown/podman-network.1.md @@ -41,10 +41,13 @@ Podman requires specific default IPs and, thus, network subnets. The default va ### Podman network The default bridge network (called `podman`) uses 10.88.0.0/16 as a subnet. When Podman runs as root, the `podman` network is used as default. It is the same as adding the option `--network bridge` or `--network podman`. This subnet can be changed in **[containers.conf(5)](https://github.com/containers/common/blob/main/docs/containers.conf.5.md)** under the [network] section. Set the `default_subnet` to any subnet that is free in the environment. The name of the default network can also be changed from `podman` to another name using the default network key. Note that this is only done when no containers are running. -### Slirp4netns -When Podman is run as rootless, the internet connectivity is provided with slirp4netns by default. Slirp4nents uses 10.0.2.0/24 for its default network. This can also be changed in **[containers.conf(5)](https://github.com/containers/common/blob/main/docs/containers.conf.5.md)** but under the `[engine]` section. Use the `network_cmd_options` key and add `["cidr=X.X.X.X/24"]` as a value. Note that slirp4netns needs a network prefix size between 1 and 25. This option accepts an array, so more options can be added in a comma-separated string as described on the **[podman-network-create(1)](podman-network-create.1.md)** man page. To change the CIDR for just one container, specify it on the cli using the `--network` option like this: `--network slirp4netns:cidr=192.168.1.0/24`. +### Pasta +Pasta by default performs no Network Address Translation (NAT) and copies the IPs from your main interface into the container namespace. If pasta cannot find an interface with the default route, it will select an interface if there is only one interface with a valid route. If you do not have a default route and several interfaces have defined routes, pasta will be unable to figure out the correct interface and it will fail to start. To specify the interface, use `-i` option to pasta. A default set of pasta options can be set in **[containers.conf(5)](https://github.com/containers/common/blob/main/docs/containers.conf.5.md)** under the `[network]` section with the `pasta_options` key. + +The default rootless networking tool can be selected in **[containers.conf(5)](https://github.com/containers/common/blob/main/docs/containers.conf.5.md)** under the `[network]` section with `default_rootless_network_cmd`, which can be set to `pasta` (default) or `slirp4netns`. -When using the default network as rootless, i.e. --network podman/bridge, then it also uses the same subnet as described above in addition to the slirp4netns subnet. +### Slirp4netns +Slirp4nents uses 10.0.2.0/24 for its default network. This can also be changed in **[containers.conf(5)](https://github.com/containers/common/blob/main/docs/containers.conf.5.md)** but under the `[engine]` section. Use the `network_cmd_options` key and add `["cidr=X.X.X.X/24"]` as a value. Note that slirp4netns needs a network prefix size between 1 and 25. This option accepts an array, so more options can be added in a comma-separated string as described on the **[podman-network-create(1)](podman-network-create.1.md)** man page. To change the CIDR for just one container, specify it on the cli using the `--network` option like this: `--network slirp4netns:cidr=192.168.1.0/24`. ### Podman network create When a new network is created with a `podman network create` command, and no subnet is given with the --subnet option, Podman starts picking a free subnet from 10.89.0.0/24 to 10.255.255.0/24. Use the `default_subnet_pools` option under the `[network]` section in **[containers.conf(5)](https://github.com/containers/common/blob/main/docs/containers.conf.5.md)** to change the range and/or size that is assigned by default. diff --git a/docs/source/markdown/podman-pod-exists.1.md b/docs/source/markdown/podman-pod-exists.1.md index 6923aadb2c..8ccc0b575b 100644 --- a/docs/source/markdown/podman-pod-exists.1.md +++ b/docs/source/markdown/podman-pod-exists.1.md @@ -14,20 +14,16 @@ was an issue accessing the local storage. ## EXAMPLES -Check if a pod called `web` exists in local storage (the pod does actually exist). +Check if specified pod exists in local storage (the pod does actually exist): ``` -$ sudo podman pod exists web -$ echo $? +$ sudo podman pod exists web; echo $? 0 -$ ``` -Check if a pod called `backend` exists in local storage (the pod does not actually exist). +Check if specified pod exists in local storage (the pod does not actually exist): ``` -$ sudo podman pod exists backend -$ echo $? +$ sudo podman pod exists backend; echo $? 1 -$ ``` ## SEE ALSO diff --git a/docs/source/markdown/podman-pod-inspect.1.md.in b/docs/source/markdown/podman-pod-inspect.1.md.in index 3df31e91e5..463a064143 100644 --- a/docs/source/markdown/podman-pod-inspect.1.md.in +++ b/docs/source/markdown/podman-pod-inspect.1.md.in @@ -60,6 +60,8 @@ Valid placeholders for the Go template are listed below: @@option latest ## EXAMPLE + +Inspect specified pod: ``` # podman pod inspect foobar [ diff --git a/docs/source/markdown/podman-pod-kill.1.md.in b/docs/source/markdown/podman-pod-kill.1.md.in index 2c65ff5b47..75035c09f7 100644 --- a/docs/source/markdown/podman-pod-kill.1.md.in +++ b/docs/source/markdown/podman-pod-kill.1.md.in @@ -20,27 +20,27 @@ Sends signal to all containers associated with a pod. ## EXAMPLE -Kill pod with a given name +Kill pod with a given name: ``` podman pod kill mywebserver ``` -Kill pod with a given ID +Kill pod with a given ID: ``` podman pod kill 860a4b23 ``` -Terminate pod by sending `TERM` signal +Terminate pod by sending `TERM` signal: ``` podman pod kill --signal TERM 860a4b23 ``` -Kill the latest pod. (This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines) +Kill the latest pod. (This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines): ``` podman pod kill --latest ``` -Terminate all pods by sending `KILL` signal +Terminate all pods by sending `KILL` signal: ``` podman pod kill --all ``` diff --git a/docs/source/markdown/podman-pod-logs.1.md.in b/docs/source/markdown/podman-pod-logs.1.md.in index 18298f11b9..dd07998aae 100644 --- a/docs/source/markdown/podman-pod-logs.1.md.in +++ b/docs/source/markdown/podman-pod-logs.1.md.in @@ -40,7 +40,7 @@ To view a pod's logs: podman pod logs -t podIdorName ``` -To view logs of a specific container on the pod +To view logs of a specific container on the pod: ``` podman pod logs -c ctrIdOrName podIdOrName ``` diff --git a/docs/source/markdown/podman-pod-pause.1.md b/docs/source/markdown/podman-pod-pause.1.md index 6519ae2e72..cf238ea482 100644 --- a/docs/source/markdown/podman-pod-pause.1.md +++ b/docs/source/markdown/podman-pod-pause.1.md @@ -21,12 +21,12 @@ Instead of providing the pod name or ID, pause the last created pod. (This optio ## EXAMPLE -Pause a pod with a given name +Pause a pod with a given name: ``` podman pod pause mywebserverpod ``` -Pause a pod with a given ID +Pause a pod with a given ID: ``` podman pod pause 860a4b23 ``` diff --git a/docs/source/markdown/podman-pod-restart.1.md b/docs/source/markdown/podman-pod-restart.1.md index 63a74d135e..6a8365e779 100644 --- a/docs/source/markdown/podman-pod-restart.1.md +++ b/docs/source/markdown/podman-pod-restart.1.md @@ -7,7 +7,7 @@ podman\-pod\-restart - Restart one or more pods **podman pod restart** [*options*] *pod* ... ## DESCRIPTION -Restart containers in one or more pods. Running containers are stopped an restarted. +Restart containers in one or more pods. Running containers are stopped and restarted. Stopped containers are started. You may use pod IDs or names as input. The pod ID is printed upon successful restart. When restarting multiple pods, an error from restarting one pod does not effect restarting other pods. @@ -24,26 +24,26 @@ Instead of providing the pod name or ID, restart the last created pod. (This opt ## EXAMPLE -Restart pod with a given name +Restart pod with a given name: ``` podman pod restart mywebserverpod cc8f0bea67b1a1a11aec1ecd38102a1be4b145577f21fc843c7c83b77fc28907 ``` -Restart multiple pods with given IDs +Restart multiple pods with given IDs: ``` podman pod restart 490eb 3557fb 490eb241aaf704d4dd2629904410fe4aa31965d9310a735f8755267f4ded1de5 3557fbea6ad61569de0506fe037479bd9896603c31d3069a6677f23833916fab ``` -Restart the last created pod +Restart the last created pod: ``` podman pod restart --latest 3557fbea6ad61569de0506fe037479bd9896603c31d3069a6677f23833916fab ``` -Restart all pods +Restart all pods: ``` podman pod restart --all 19456b4cd557eaf9629825113a552681a6013f8c8cad258e36ab825ef536e818 diff --git a/docs/source/markdown/podman-pod-rm.1.md.in b/docs/source/markdown/podman-pod-rm.1.md.in index 99fc2b0a37..3271fdeab1 100644 --- a/docs/source/markdown/podman-pod-rm.1.md.in +++ b/docs/source/markdown/podman-pod-rm.1.md.in @@ -32,28 +32,28 @@ The --force option must be specified to use the --time option. ## EXAMPLE -Remove pod with a given name +Remove pod with a given name: ``` podman pod rm mywebserverpod ``` -Remove multiple pods with given names and/or IDs +Remove multiple pods with given names and/or IDs: ``` podman pod rm mywebserverpod myflaskserverpod 860a4b23 ``` -Forcefully remove pod with a given ID +Forcefully remove pod with a given ID: ``` podman pod rm -f 860a4b23 ``` -Forcefully remove all pods +Forcefully remove all pods: ``` podman pod rm -f -a podman pod rm -fa ``` -Remove pod using ID specified in a given file +Remove pod using ID specified in a given file: ``` podman pod rm --pod-id-file /path/to/id/file ``` diff --git a/docs/source/markdown/podman-pod-start.1.md.in b/docs/source/markdown/podman-pod-start.1.md.in index fd59736472..c9785282da 100644 --- a/docs/source/markdown/podman-pod-start.1.md.in +++ b/docs/source/markdown/podman-pod-start.1.md.in @@ -22,27 +22,27 @@ Starts all pods ## EXAMPLE -Start pod with a given name +Start pod with a given name: ``` podman pod start mywebserverpod ``` -Start pods with given IDs +Start pods with given IDs: ``` podman pod start 860a4b23 5421ab4 ``` -Start the latest pod. (This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines) +Start the latest pod. (This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines): ``` podman pod start --latest ``` -Start all pods +Start all pods: ``` podman pod start --all ``` -Start pod using ID specified in a given file +Start pod using ID specified in a given file: ``` podman pod start --pod-id-file /path/to/id/file ``` diff --git a/docs/source/markdown/podman-pod-stats.1.md.in b/docs/source/markdown/podman-pod-stats.1.md.in index 2dfb5cef0b..c9ebde8aeb 100644 --- a/docs/source/markdown/podman-pod-stats.1.md.in +++ b/docs/source/markdown/podman-pod-stats.1.md.in @@ -44,6 +44,7 @@ When using a Go template, precede the format with `table` to print headers. ## EXAMPLE +List statistics about all pods without streaming: ``` # podman pod stats -a --no-stream ID NAME CPU % MEM USAGE / LIMIT MEM % NET IO BLOCK IO PIDS @@ -51,12 +52,14 @@ a9f807ffaacd frosty_hodgkin -- 3.092MB / 16.7GB 0.02% -- / -- - 3b33001239ee sleepy_stallman -- -- / -- -- -- / -- -- / -- -- ``` +List statistics about specified pod without streaming: ``` # podman pod stats --no-stream a9f80 ID NAME CPU % MEM USAGE / LIMIT MEM % NET IO BLOCK IO PIDS a9f807ffaacd frosty_hodgkin -- 3.092MB / 16.7GB 0.02% -- / -- -- / -- 2 ``` +List statistics about specified pod in JSON format without streaming: ``` # podman pod stats --no-stream --format=json a9f80 [ @@ -73,6 +76,7 @@ a9f807ffaacd frosty_hodgkin -- 3.092MB / 16.7GB 0.02% -- / -- -- ] ``` +List selected statistics formatted in a table about specified pod: ``` # podman pod stats --no-stream --format "table {{.ID}} {{.Name}} {{.MemUsage}}" 6eae ID NAME MEM USAGE / LIMIT diff --git a/docs/source/markdown/podman-pod-unpause.1.md b/docs/source/markdown/podman-pod-unpause.1.md index f4b50275e1..d702505879 100644 --- a/docs/source/markdown/podman-pod-unpause.1.md +++ b/docs/source/markdown/podman-pod-unpause.1.md @@ -21,12 +21,12 @@ Instead of providing the pod name or ID, unpause the last created pod. (This opt ## EXAMPLE -Unpause pod with a given name +Unpause pod with a given name: ``` podman pod unpause mywebserverpod ``` -Unpause pod with a given ID +Unpause pod with a given ID: ``` podman pod unpause 860a4b23 ``` diff --git a/docs/source/markdown/podman-port.1.md.in b/docs/source/markdown/podman-port.1.md.in index 45ac1bf128..4dd10cbec3 100644 --- a/docs/source/markdown/podman-port.1.md.in +++ b/docs/source/markdown/podman-port.1.md.in @@ -21,7 +21,7 @@ List all known port mappings for running containers; when using this option, con ## EXAMPLE -List all port mappings +List all port mappings: ``` # podman port -a b4d2f05432e482e017b1a4b2eae15fa7b4f6fb7e9f65c1bde46294fdef285906 @@ -30,21 +30,21 @@ b4d2f05432e482e017b1a4b2eae15fa7b4f6fb7e9f65c1bde46294fdef285906 # ``` -List port mappings for a specific container +List port mappings for a specific container: ``` # podman port b4d2f054 80/udp -> 0.0.0.0:44327 80/tcp -> 0.0.0.0:44327 # ``` -List the port mappings for the latest container and port 80 +List the specified port mappings for a specific container: ``` # podman port b4d2f054 80 0.0.0.0:44327 # ``` -List the port mappings for a specific container for port 80 and the tcp protocol. +List the port mappings for a specific container for port 80 and the tcp protocol: ``` # podman port b4d2f054 80/tcp 0.0.0.0:44327 diff --git a/docs/source/markdown/podman-ps.1.md b/docs/source/markdown/podman-ps.1.md index 7345e89bec..0c54d1c1d8 100644 --- a/docs/source/markdown/podman-ps.1.md +++ b/docs/source/markdown/podman-ps.1.md @@ -80,6 +80,7 @@ Valid placeholders for the Go template are listed below: | .ExitCode | Container exit code | | .Exited | "true" if container has exited | | .ExitedAt | Time (epoch seconds) that container exited | +| .ExposedPorts ... | Map of exposed ports on this container | | .ID | Container ID | | .Image | Image Name/ID | | .ImageID | Image ID | @@ -92,7 +93,7 @@ Valid placeholders for the Go template are listed below: | .Pid | Process ID on host system | | .Pod | Pod the container is associated with (SHA) | | .PodName | PodName of the container | -| .Ports | Exposed ports | +| .Ports | Forwarded and exposed ports | | .Restarts | Display the container restart count | | .RunningFor | Time elapsed since container was started | | .Size | Size of container | diff --git a/docs/source/markdown/podman-pull.1.md.in b/docs/source/markdown/podman-pull.1.md.in index 2046cbe9b0..f42981b36d 100644 --- a/docs/source/markdown/podman-pull.1.md.in +++ b/docs/source/markdown/podman-pull.1.md.in @@ -210,8 +210,10 @@ Storing signatures ``` Pull an image with up to 6 retries, delaying 10 seconds between retries in quet mode. +``` $ podman --remote pull -q --retry 6 --retry-delay 10s ubi9 4d6addf62a90e392ff6d3f470259eb5667eab5b9a8e03d20b41d0ab910f92170 +``` ## SEE ALSO **[podman(1)](podman.1.md)**, **[podman-push(1)](podman-push.1.md)**, **[podman-login(1)](podman-login.1.md)**, **[containers-certs.d(5)](https://github.com/containers/image/blob/main/docs/containers-certs.d.5.md)**, **[containers-registries.conf(5)](https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md)**, **[containers-transports(5)](https://github.com/containers/image/blob/main/docs/containers-transports.5.md)** diff --git a/docs/source/markdown/podman-push.1.md.in b/docs/source/markdown/podman-push.1.md.in index ffa1de761c..990783bccb 100644 --- a/docs/source/markdown/podman-push.1.md.in +++ b/docs/source/markdown/podman-push.1.md.in @@ -84,6 +84,10 @@ When writing the output image, suppress progress output Discard any pre-existing signatures in the image. +@@option retry + +@@option retry-delay + #### **--sign-by**=*key* Add a “simple signing” signature at the destination using the specified key. (This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines) @@ -103,29 +107,32 @@ Add a sigstore signature at the destination using a private key at the specified ## EXAMPLE -This example pushes the image specified by the imageID to a local directory in docker format. - - `# podman push imageID dir:/path/to/image` - -This example pushes the image specified by the imageID to a local directory in oci format. - - `# podman push imageID oci-archive:/path/to/layout:image:tag` - -This example pushes the image specified by the imageID to a container registry named registry.example.com - - `# podman push imageID docker://registry.example.com/repository:tag` - -This example pushes the image specified by the imageID to a container registry named registry.example.com and saves the digest in the specified digestfile. +Push the specified image to a local directory: +``` +# podman push imageID dir:/path/to/image +``` - `# podman push --digestfile=/tmp/mydigest imageID docker://registry.example.com/repository:tag` +Push the specified image to a local directory in OCI format: +``` +# podman push imageID oci-archive:/path/to/layout:image:tag +``` -This example pushes the image specified by the imageID and puts it into the local docker container store +Push the specified image to a container registry: +``` +# podman push imageID docker://registry.example.com/repository:tag +``` - `# podman push imageID docker-daemon:image:tag` +Push the specified image to a container registry and save the digest in the specified file: +``` +# podman push --digestfile=/tmp/mydigest imageID docker://registry.example.com/repository:tag +``` -This example pushes the alpine image to umohnani/alpine on dockerhub and reads the creds from -the path given to --authfile +Push the specified image into the local Docker daemon container store: +``` +# podman push imageID docker-daemon:image:tag +``` +Push the specified image with a different image name using credentials from an alternate authfile path: ``` # podman push --authfile temp-auths/myauths.json alpine docker://docker.io/umohnani/alpine Getting image source signatures @@ -137,7 +144,7 @@ Writing manifest to image destination Storing signatures ``` -This example pushes the rhel7 image to rhel7-dir with the "oci" manifest type +Push the specified image to a local directory as an OCI image: ``` # podman push --format oci registry.access.redhat.com/rhel7 dir:rhel7-dir Getting image source signatures diff --git a/docs/source/markdown/podman-run.1.md.in b/docs/source/markdown/podman-run.1.md.in index 7756f0c77c..879dcde569 100644 --- a/docs/source/markdown/podman-run.1.md.in +++ b/docs/source/markdown/podman-run.1.md.in @@ -346,6 +346,10 @@ Suppress output information when pulling images @@option restart +@@option retry + +@@option retry-delay + #### **--rm** Automatically remove the container and any anonymous unnamed volume associated with diff --git a/docs/source/markdown/podman-secret-create.1.md b/docs/source/markdown/podman-secret-create.1.md index 0cd6f290cf..68c18fdbb8 100644 --- a/docs/source/markdown/podman-secret-create.1.md +++ b/docs/source/markdown/podman-secret-create.1.md @@ -94,8 +94,14 @@ Create gpg encrypted secret based on local file using the pass driver. $ podman secret create --driver=pass my_secret ./secret.txt.gpg ``` +Create a secret from an environment variable called 'MYSECRET'. +``` +$ podman secret create --env=true my_secret MYSECRET +``` + ## SEE ALSO **[podman(1)](podman.1.md)**, **[podman-secret(1)](podman-secret.1.md)**, **[podman-login(1)](podman-login.1.md)** ## HISTORY January 2021, Originally compiled by Ashley Cui +February 2024, Added example showing secret creation from an environment variable by Brett Calliss diff --git a/docs/source/markdown/podman-secret-ls.1.md.in b/docs/source/markdown/podman-secret-ls.1.md.in index c9fb430898..a83a4be6b9 100644 --- a/docs/source/markdown/podman-secret-ls.1.md.in +++ b/docs/source/markdown/podman-secret-ls.1.md.in @@ -61,7 +61,7 @@ List the name field of all secrets. $ podman secret ls --format "{{.Name}}" ``` -List all secrets whose name includes the the specified string. +List all secrets whose name includes the specified string. ``` $ podman secret ls --filter name=confidential ``` diff --git a/docs/source/markdown/podman-stats.1.md.in b/docs/source/markdown/podman-stats.1.md.in index 8bb3a3557f..0c811198b1 100644 --- a/docs/source/markdown/podman-stats.1.md.in +++ b/docs/source/markdown/podman-stats.1.md.in @@ -79,6 +79,7 @@ Do not truncate output ## EXAMPLE +List statistics about all running containers without streaming mode: ``` # podman stats -a --no-stream ID NAME CPU % MEM USAGE / LIMIT MEM % NET IO BLOCK IO PIDS @@ -86,18 +87,21 @@ a9f807ffaacd frosty_hodgkin -- 3.092MB / 16.7GB 0.02% -- / -- - 3b33001239ee sleepy_stallman -- -- / -- -- -- / -- -- / -- -- ``` +List the specified container's statistics in streaming mode: ``` -# podman stats --no-stream a9f80 +# podman stats a9f80 ID NAME CPU % MEM USAGE / LIMIT MEM % NET IO BLOCK IO PIDS a9f807ffaacd frosty_hodgkin -- 3.092MB / 16.7GB 0.02% -- / -- -- / -- 2 ``` +List the specified statistics about the specified container in table format: ``` $ podman stats --no-trunc 3667 --format 'table {{ .ID }} {{ .MemUsage }}' ID MEM USAGE / LIMIT 3667c6aacb06aac2eaffce914c01736420023d56ef9b0f4cfe58b6d6a78b7503 49.15kB / 67.17GB ``` +List the specified container statistics in JSON format: ``` # podman stats --no-stream --format=json a9f80 [ @@ -114,6 +118,7 @@ ID MEM USAGE / LI ] ``` +List the specified container statistics in table format: ``` # podman stats --no-stream --format "table {{.ID}} {{.Name}} {{.MemUsage}}" 6eae ID NAME MEM USAGE / LIMIT diff --git a/docs/source/markdown/podman-system-check.1.md b/docs/source/markdown/podman-system-check.1.md new file mode 100644 index 0000000000..1abe83ce6c --- /dev/null +++ b/docs/source/markdown/podman-system-check.1.md @@ -0,0 +1,59 @@ +% podman-system-check 1 + +## NAME +podman\-system\-check - Perform consistency checks on image and container storage + +## SYNOPSIS +**podman system check** [*options*] + +## DESCRIPTION +Perform consistency checks on image and container storage, reporting images and +containers which have identified issues. + +## OPTIONS + +#### **--force**, **-f** + +When attempting to remove damaged images, also remove containers which depend +on those images. By default, damaged images which are being used by containers +are left alone. + +Containers which depend on damaged images do so regardless of which engine +created them, but because podman only "knows" how to shut down containers that +it started, the effect on still-running containers which were started by other +engines is difficult to predict. + +#### **--max**, **-m**=*duration* + +When considering layers which are not used by any images or containers, assume +that any layers which are more than *duration* old are the results of canceled +attempts to pull images, and should be treated as though they are damaged. + +#### **--quick**, **-q** + +Skip checks which are known to be time-consuming. This will prevent some types +of errors from being detected. + +#### **--repair**, **-r** + +Remove any images which are determined to have been damaged in some way, unless +they are in use by containers. Use **--force** to remove containers which +depend on damaged images, and those damaged images, as well. + +## EXAMPLE + +A reasonably quick check: +``` +podman system check --quick --repair --force +``` + +A more thorough check: +``` +podman system check --repair --max=1h --force +``` + +## SEE ALSO +**[podman(1)](podman.1.md)**, **[podman-system(1)](podman-system.1.md)** + +## HISTORY +April 2024 diff --git a/docs/source/markdown/podman-system-connection-add.1.md b/docs/source/markdown/podman-system-connection-add.1.md index 5ac265d098..2e50d21ad2 100644 --- a/docs/source/markdown/podman-system-connection-add.1.md +++ b/docs/source/markdown/podman-system-connection-add.1.md @@ -36,13 +36,24 @@ Port for ssh destination. The default value is `22`. Path to the Podman service unix domain socket on the ssh destination host ## EXAMPLE + +Add a named system connection: ``` $ podman system connection add QA podman.example.com +``` +Add a system connection using SSH data: +``` $ podman system connection add --identity ~/.ssh/dev_rsa production ssh://root@server.example.com:2222 +``` +Add a named system connection to local Unix domain socket: +``` $ podman system connection add testing unix:///run/podman/podman.sock +``` +Add a named system connection to local tcp socket: +``` $ podman system connection add debug tcp://localhost:8080 ``` ## SEE ALSO diff --git a/docs/source/markdown/podman-system-connection-default.1.md b/docs/source/markdown/podman-system-connection-default.1.md index 574625cef2..465801a5d3 100644 --- a/docs/source/markdown/podman-system-connection-default.1.md +++ b/docs/source/markdown/podman-system-connection-default.1.md @@ -10,6 +10,8 @@ podman\-system\-connection\-default - Set named destination as default for the P Set named ssh destination as default destination for the Podman service. ## EXAMPLE + +Set the specified connection as default: ``` $ podman system connection default production ``` diff --git a/docs/source/markdown/podman-system-connection-list.1.md b/docs/source/markdown/podman-system-connection-list.1.md index edd2372122..ed0e94acd1 100644 --- a/docs/source/markdown/podman-system-connection-list.1.md +++ b/docs/source/markdown/podman-system-connection-list.1.md @@ -31,6 +31,8 @@ Valid placeholders for the Go template listed below: Only show connection names ## EXAMPLE + +List system connections: ``` $ podman system connection list Name URI Identity Default ReadWrite diff --git a/docs/source/markdown/podman-system-connection-remove.1.md b/docs/source/markdown/podman-system-connection-remove.1.md index 45fede0215..ad351e9296 100644 --- a/docs/source/markdown/podman-system-connection-remove.1.md +++ b/docs/source/markdown/podman-system-connection-remove.1.md @@ -16,6 +16,8 @@ Delete named ssh destination. Remove all connections. ## EXAMPLE + +Remove the specified system connection: ``` $ podman system connection remove production ``` diff --git a/docs/source/markdown/podman-system-connection-rename.1.md b/docs/source/markdown/podman-system-connection-rename.1.md index ed10c1cd6f..296abdbde0 100644 --- a/docs/source/markdown/podman-system-connection-rename.1.md +++ b/docs/source/markdown/podman-system-connection-rename.1.md @@ -10,6 +10,8 @@ podman\-system\-connection\-rename - Rename the destination for Podman service Rename ssh destination from *old* to *new*. ## EXAMPLE + +Rename the specified connection: ``` $ podman system connection rename laptop devel ``` diff --git a/docs/source/markdown/podman-system-connection.1.md b/docs/source/markdown/podman-system-connection.1.md index f126d3b116..c76985b9bf 100644 --- a/docs/source/markdown/podman-system-connection.1.md +++ b/docs/source/markdown/podman-system-connection.1.md @@ -33,6 +33,8 @@ not be edited with the **podman system connection** commands. | rename | [podman-system-connection\-rename(1)](podman-system-connection-rename.1.md) | Rename the destination for Podman service | ## EXAMPLE + +List system connections: ``` $ podman system connection list Name URI Identity Default ReadWrite diff --git a/docs/source/markdown/podman-system-df.1.md b/docs/source/markdown/podman-system-df.1.md index 8b8e779841..a2dc5c6f39 100644 --- a/docs/source/markdown/podman-system-df.1.md +++ b/docs/source/markdown/podman-system-df.1.md @@ -31,13 +31,18 @@ Valid placeholders for the Go template are listed below: Show detailed information on space usage ## EXAMPLE + +Show disk usage: ``` $ podman system df TYPE TOTAL ACTIVE SIZE RECLAIMABLE Images 6 2 281MB 168MB (59%) Containers 3 1 0B 0B (0%) Local Volumes 1 1 22B 0B (0%) +``` +Show disk usage in verbose mode: +``` $ podman system df -v Images space usage: diff --git a/docs/source/markdown/podman-system.1.md b/docs/source/markdown/podman-system.1.md index 103b877aa2..0f20ced2c4 100644 --- a/docs/source/markdown/podman-system.1.md +++ b/docs/source/markdown/podman-system.1.md @@ -13,6 +13,7 @@ The system command allows management of the podman systems | Command | Man Page | Description | | ------- | ------------------------------------------------------------ | ------------------------------------------------------------------------ | +| check | [podman-system-check(1)](podman-system-check.1.md) | Perform consistency checks on image and container storage. | connection | [podman-system-connection(1)](podman-system-connection.1.md) | Manage the destination(s) for Podman service(s) | | df | [podman-system-df(1)](podman-system-df.1.md) | Show podman disk usage. | | events | [podman-events(1)](podman-events.1.md) | Monitor Podman events | diff --git a/docs/source/markdown/podman-systemd.unit.5.md b/docs/source/markdown/podman-systemd.unit.5.md index b392829e7a..7323e6d227 100644 --- a/docs/source/markdown/podman-systemd.unit.5.md +++ b/docs/source/markdown/podman-systemd.unit.5.md @@ -6,14 +6,18 @@ podman\-systemd.unit - systemd units using Podman Quadlet ## SYNOPSIS -*name*.container, *name*.volume, *name*.network, *name*.kube *name*.image, *name*.pod +*name*.container, *name*.volume, *name*.network, *name*.kube *name*.image, *name*.build *name*.pod -### Podman unit search path +### Podman rootful unit search path + +Quadlet files for the root user can be placed in the following two directories: * /etc/containers/systemd/ * /usr/share/containers/systemd/ -### Podman user unit search path +### Podman rootless unit search path + +Quadlet files for non-root users can be placed in the following directories * $XDG_CONFIG_HOME/containers/systemd/ or ~/.config/containers/systemd/ * /etc/containers/systemd/users/$(UID) @@ -26,7 +30,7 @@ Symbolic links below the search paths are not supported. ## DESCRIPTION -Podman supports starting containers (and creating volumes) via systemd by using a +Podman supports building, and starting containers (and creating volumes) via systemd by using a [systemd generator](https://www.freedesktop.org/software/systemd/man/systemd.generator.html). These files are read during boot (and when `systemctl daemon-reload` is run) and generate corresponding regular systemd service unit files. Both system and user systemd units are supported. @@ -35,7 +39,7 @@ the [Service] table and [Install] tables pass directly to systemd and are handle See systemd.unit(5) man page for more information. The Podman generator reads the search paths above and reads files with the extensions `.container` -`.volume`, `.network`, `.pod` and `.kube`, and for each file generates a similarly named `.service` file. Be aware that +`.volume`, `.network`, `.build`, `.pod` and `.kube`, and for each file generates a similarly named `.service` file. Be aware that existing vendor services (i.e., in `/usr/`) are replaced if they have the same name. The generated unit files can be started and managed with `systemctl` like any other systemd service. `systemctl {--user} list-unit-files` lists existing unit files on the system. @@ -61,9 +65,12 @@ session gets started. For unit files placed in subdirectories within /etc/containers/systemd/user/${UID}/ and the other user unit search paths, Quadlet will recursively search and run the unit files present in these subdirectories. -Note: When a Quadlet is starting, Podman often pulls one more container images which may take a considerable amount of time. +Note: When a Quadlet is starting, Podman often pulls or builds one more container images which may take a considerable amount of time. Systemd defaults service start time to 90 seconds, or fails the service. Pre-pulling the image or extending the systemd timeout time for the service using the *TimeoutStartSec* Service option can fix the problem. +A word of caution: *TimeoutStartSec* is not available for `Type=oneshot` units. Refer to `systemd.service(5)` +for more information on how to handle long startup times for units which do not need to stay active +once their main process has finished. Adding the following snippet to a Quadlet file extends the systemd timeout to 15 minutes. @@ -78,13 +85,15 @@ Quadlet requires the use of cgroup v2, use `podman info --format {{.Host.Cgroups By default, the `Type` field of the `Service` section of the Quadlet file does not need to be set. Quadlet will set it to `notify` for `.container` and `.kube` files, -`forking` for `.pod` files, and `oneshot` for `.volume`, `.network` and `.image` files. +`forking` for `.pod` files, and `oneshot` for `.volume`, `.network`, `.build`, and `.image` files. However, `Type` may be explicitly set to `oneshot` for `.container` and `.kube` files when no containers are expected to run once `podman` exits. When setting `Type=oneshot`, it is recommended to also set `RemainAfterExit=yes` to prevent the service state -from becoming `inactive (dead)` +from becoming `inactive (dead)`. However, when activating a service via a timer unit, having `RemainAfterExit=yes` +leaves the job in a "started" state which prevents subsequent activations by the timer. For more information, see the +`systemd.service(5)` man page. Examples for such cases: - `.container` file with an image that exits after their entrypoint has finished @@ -181,6 +190,13 @@ create a drop-in file like `sleep@10.container.d/10-image.conf`: Image=quay.io/centos/centos ``` +### Relative paths + +In order to support Systemd specifiers, Quadlet does not resolve relative paths that start with `%`. +To resolve such a path, prepend it with `./`. + +For example, instead of `EnvironmentFile=%n/env` use `EnvironmentFile=./%n/env` + ### Debugging unit files After placing the unit file in one of the unit search paths (mentioned @@ -244,6 +260,7 @@ Valid options for `[Container]` are listed below: | GIDMap=0:10000:10 | --gidmap=0:10000:10 | | GlobalArgs=--log-level=debug | --log-level=debug | | Group=1234 | --user UID:1234 | +| GroupAdd=keep-groups | --group-add=keep-groups | | HealthCmd=/usr/bin/command | --health-cmd=/usr/bin/command | | HealthInterval=2m | --health-interval=2m | | HealthOnFailure=kill | --health-on-failure=kill | @@ -261,6 +278,7 @@ Valid options for `[Container]` are listed below: | IP6=2001:db8::1 | --ip6 2001:db8::1 | | Label="XYZ" | --label "XYZ" | | LogDriver=journald | --log-driver journald | +| LogOpt=path=/var/log/mykube\.json | --log-opt path=/var/log/mykube\.json | | Mask=/proc/sys/foo\:/proc/sys/bar | --security-opt mask=/proc/sys/foo:/proc/sys/bar | | Mount=type=... | --mount type=... | | Network=host | --net host | @@ -436,6 +454,11 @@ This key can be listed multiple times. The (numeric) GID to run as inside the container. This does not need to match the GID on the host, which can be modified with `UsersNS`, but if that is not specified, this GID is also used on the host. +### `GroupAdd=` + +Assign additional groups to the primary user running within the container process. Also supports the `keep-groups` special flag. +Equivalent to the Podman `--group-add` option. + ### `HealthCmd=` Set or alter a healthcheck command for a container. A value of none disables existing healthchecks. @@ -535,6 +558,12 @@ This key can be listed multiple times. Set the log-driver used by Podman when running the container. Equivalent to the Podman `--log-driver` option. +### `LogOpt=` + +Set the log-opt (logging options) used by Podman when running the container. +Equivalent to the Podman `--log-opt` option. +This key can be listed multiple times. + ### `Mask=` Specify the paths to mask separated by a colon. `Mask=/path/1:/path/2`. A masked path cannot be accessed inside the container. @@ -565,12 +594,12 @@ created by using a `$name.network` Quadlet file. This key can be listed multiple times. -### `NoNewPrivileges=` (defaults to `no`) +### `NoNewPrivileges=` (defaults to `false`) If enabled, this disables the container processes from gaining additional privileges via things like setuid and file capabilities. -### `Notify=` (defaults to `no`) +### `Notify=` (defaults to `false`) By default, Podman is run in such a way that the systemd startup notify command is handled by the container runtime. In other words, the service is deemed started when the container runtime @@ -631,13 +660,13 @@ This key can be listed multiple times. Set the image pull policy. This is equivalent to the Podman `--pull` option -### `ReadOnly=` (defaults to `no`) +### `ReadOnly=` (defaults to `false`) If enabled, makes the image read-only. -### `ReadOnlyTmpfs=` (defaults to `yes`) +### `ReadOnlyTmpfs=` (defaults to `true`) -If ReadOnly is set to `yes`, mount a read-write tmpfs on /dev, /dev/shm, /run, /tmp, and /var/tmp. +If ReadOnly is set to `true`, mount a read-write tmpfs on /dev, /dev/shm, /run, /tmp, and /var/tmp. ### `Rootfs=` @@ -647,7 +676,7 @@ The format of the rootfs is the same as when passed to `podman run --rootfs`, so Note: On SELinux systems, the rootfs needs the correct label, which is by default unconfined_u:object_r:container_file_t:s0. -### `RunInit=` (default to `no`) +### `RunInit=` (default to `false`) If enabled, the container has a minimal init process inside the container that forwards signals and reaps processes. @@ -873,6 +902,8 @@ Note that not listing a host port means that Podman automatically selects one, a may be different for each invocation of service. This makes that a less useful option. The allocated port can be found with the `podman port` command. +When using `host` networking via `Network=host`, the `PublishPort=` option cannot be used. + This key can be listed multiple times. ### `Volume=` @@ -1082,7 +1113,7 @@ Load the specified containers.conf(5) module. Equivalent to the Podman `--module This key can be listed multiple times. -### `DisableDNS=` (defaults to `no`) +### `DisableDNS=` (defaults to `false`) If enabled, disables the DNS plugin for this network. @@ -1120,7 +1151,7 @@ escaped to allow inclusion of whitespace and other control characters. This key can be listed multiple times. -### `Internal=` (defaults to `no`) +### `Internal=` (defaults to `false`) Restrict external access of this network. @@ -1215,7 +1246,7 @@ Valid options for `[Volume]` are listed below: | Group=192 | --opt group=192 | | Image=quay.io/centos/centos\:latest | --opt image=quay.io/centos/centos\:latest | | Label="foo=bar" | --label "foo=bar" | -| Options=XYZ | --opt XYZ | +| Options=XYZ | --opt "o=XYZ" | | PodmanArgs=--driver=image | --driver=image | | Type=type | Filesystem type of Device | | User=123 | --opt uid=123 | @@ -1229,7 +1260,7 @@ Load the specified containers.conf(5) module. Equivalent to the Podman `--module This key can be listed multiple times. -### `Copy=` (default to `yes`) +### `Copy=` (default to `true`) If enabled, the content of the image located at the mountpoint of the volume is copied into the volume on the first run. @@ -1312,6 +1343,251 @@ The (optional) name of the Podman volume. If this is not specified, the default `systemd-%N` is used, which is the same as the unit name but with a `systemd-` prefix to avoid conflicts with user-managed volumes. +## Build units [Build] + +Build files are named with a `.build` extension and contain a section `[Build]` describing the image +build command. The generated service is a one-time command that ensures that the image is built on +the host from a supplied Containerfile and context directory. Subsequent (re-)starts of the +generated built service will usually finish quickly, as image layer caching will skip unchanged +build steps. + +A minimal `.build` unit needs at least the `ImageTag=` key, and either of `File=` or +`SetWorkingDirectory=` keys. + +Using build units allows containers and volumes to depend on images being built locally. This can be +interesting for creating container images not available on container registries, or for local +testing and development. + +Valid options for `[Build]` are listed below: + +| **[Build] options** | **podman build equivalent** | +|-------------------------------------|---------------------------------------------| +| Annotation=annotation=value | --annotation=annotation=value | +| Arch=aarch64 | --arch=aarch64 | +| AuthFile=/etc/registry/auth\.json | --authfile=/etc/registry/auth\.json | +| ContainersConfModule=/etc/nvd\.conf | --module=/etc/nvd\.conf | +| DNS=192.168.55.1 | --dns=192.168.55.1 | +| DNSOption=ndots:1 | --dns-option=ndots:1 | +| DNSSearch=foo.com | --dns-search=foo.com | +| Environment=foo=bar | --env foo=bar | +| File=/path/to/Containerfile | --file=/path/to/Containerfile | +| ForceRM=false | --force-rm=false | +| GlobalArgs=--log-level=debug | --log-level=debug | +| GroupAdd=keep-groups | --group-add=keep-groups | +| ImageTag=localhost/imagename | --tag=localhost/imagename | +| Label=label | --label=label | +| Network=host | --network=host | +| PodmanArgs=--add-host foobar | --add-host foobar | +| Pull=never | --pull=never | +| Secret=secret | --secret=id=mysecret,src=path | +| SetWorkingDirectory=unit | Set `WorkingDirectory` of systemd unit file | +| Target=my-app | --target=my-app | +| TLSVerify=false | --tls-verify=false | +| Variant=arm/v7 | --variant=arm/v7 | +| Volume=/source:/dest | --volume /source:/dest | + +### `Annotation=` + +Add an image *annotation* (e.g. annotation=*value*) to the image metadata. Can be used multiple +times. + +This is equivalant to the `--annotation` option of `podman build`. + +### `Arch=` + +Override the architecture, defaults to hosts', of the image to be built. + +This is equivalent to the `--arch` option of `podman build`. + +### `AuthFile=` + +Path of the authentication file. + +This is equivalent to the `--authfile` option of `podman build`. + +### `ContainersConfModule=` + +Load the specified containers.conf(5) module. Equivalent to the Podman `--module` option. + +This key can be listed multiple times. + +### `DNS=` + +Set network-scoped DNS resolver/nameserver for the build container. + +This key can be listed multiple times. + +This is equivalent to the `--dns` option of `podman build`. + +### `DNSOption=` + +Set custom DNS options. + +This key can be listed multiple times. + +This is equivalent to the `--dns-option` option of `podman build`. + +### `DNSSearch=` + +Set custom DNS search domains. Use **DNSSearch=.** to remove the search domain. + +This key can be listed multiple times. + +This is equivalent to the `--dns-search` option of `podman build`. + +### `Environment=` + +Add a value (e.g. env=*value*) to the built image. This uses the same format as [services in +systemd](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Environment=) and can be +listed multiple times. + +### `File=` + +Specifies a Containerfile which contains instructions for building the image. A URL starting with +`http(s)://` allows you to specify a remote Containerfile to be downloaded. Note that for a given +relative path to a Containerfile, or when using a `http(s)://` URL, you also must set +`SetWorkingDirectory=` in order for `podman build` to find a valid context directory for the +resources specified in the Containerfile. + +Note that setting a `File=` field is mandatory for a `.build` file, unless `SetWorkingDirectory` (or +a `WorkingDirectory` in the `Service` group) has also been set. + +This is equivalent to the `--file` option of `podman build`. + +### `ForceRM=` + +Always remove intermediate containers after a build, even if the build fails (default true). + +This is equivalent to the `--force-rm` option of `podman build`. + +### `GlobalArgs=` + +This key contains a list of arguments passed directly between `podman` and `build` in the generated +file. It can be used to access Podman features otherwise unsupported by the generator. Since the +generator is unaware of what unexpected interactions can be caused by these arguments, it is not +recommended to use this option. + +The format of this is a space separated list of arguments, which can optionally be individually +escaped to allow inclusion of whitespace and other control characters. + +This key can be listed multiple times. + +### `GroupAdd=` + +Assign additional groups to the primary user running within the container process. Also supports the +`keep-groups` special flag. + +This is equivalent to the `--group-add` option of `podman build`. + +### `ImageTag=` + +Specifies the name which is assigned to the resulting image if the build process completes +successfully. + +This is equivalent to the `--tag` option of `podman build`. + +### `Label=` + +Add an image *label* (e.g. label=*value*) to the image metadata. Can be used multiple times. + +This is equivalent to the `--label` option of `podman build`. + +### `Network=` + +Sets the configuration for network namespaces when handling RUN instructions. This has the same +format as the `--network` option to `podman build`. For example, use `host` to use the host network, +or `none` to not set up networking. + +As a special case, if the `name` of the network ends with `.network`, Quadlet will look for the +corresponding `.network` Quadlet unit. If found, Quadlet will use the name of the Network set in the +Unit, otherwise, `systemd-$name` is used. The generated systemd service contains a dependency on the +service unit generated for that `.network` unit, or on `$name-network.service` if the `.network` +unit is not found. + +This key can be listed multiple times. + +### `PodmanArgs=` + +This key contains a list of arguments passed directly to the end of the `podman build` command +in the generated file (right before the image name in the command line). It can be used to +access Podman features otherwise unsupported by the generator. Since the generator is unaware +of what unexpected interactions can be caused by these arguments, it is not recommended to use +this option. + +The format of this is a space separated list of arguments, which can optionally be individually +escaped to allow inclusion of whitespace and other control characters. + +This key can be listed multiple times. + +### `Pull=` + +Set the image pull policy. + +This is equivalent to the `--pull` option of `podman build`. + +### `Secret=` + +Pass secret information used in Containerfile build stages in a safe way. + +This is equivalent to the `--secret` option of `podman build` and generally has the form +`secret[,opt=opt ...]`. + +### `SetWorkingDirectory=` + +Provide context (a working directory) to `podman build`. Supported values are a path, a URL, or the +special keys `file` or `unit` to set the context directory to the parent directory of the file from +the `File=` key or to that of the Quadlet `.build` unit file, respectively. This allows Quadlet to +resolve relative paths. + +When using one of the special keys (`file` or `unit`), the `WorkingDirectory` field of the `Service` +group of the Systemd service unit will also be set to accordingly. Alternatively, users can +explicitly set the `WorkingDirectory` field of the `Service` group in the `.build` file. Please note +that if the `WorkingDirectory` field of the `Service` group is set by the user, Quadlet will not +overwrite it even if `SetWorkingDirectory` is set to `file` or `unit`. + +By providing a URL to `SetWorkingDirectory=` you can instruct `podman build` to clone a Git +repository or download an archive file extracted to a temporary location by `podman build` as build +context. Note that in this case, the `WorkingDirectory` of the Systemd service unit is left +untouched by Quadlet. + +Note that providing context directory is mandatory for a `.build` file, unless a `File=` key has +also been provided. + +### `Target=` + +Set the target build stage to build. Commands in the Containerfile after the target stage are +skipped. + +This is equivalent to the `--target` option of `podman build`. + +### `TLSVerify=` + +Require HTTPS and verification of certificates when contacting registries. + +This is equivalent to the `--tls-verify` option of `podman build`. + +### `Variant=` + +Override the default architecture variant of the container image to be built. + +This is equivalent to the `--variant` option of `podman build`. + +### `Volume=` + +Mount a volume to containers when executing RUN instructions during the build. This is equivalent to +the `--volume` option of `podman build`, and generally has the form +`[[SOURCE-VOLUME|HOST-DIR:]CONTAINER-DIR[:OPTIONS]]`. + +If `SOURCE-VOLUME` starts with `.`, Quadlet resolves the path relative to the location of the unit file. + +As a special case, if `SOURCE-VOLUME` ends with `.volume`, Quadlet will look for the corresponding +`.volume` Quadlet unit. If found, Quadlet will use the name of the Volume set in the Unit, +otherwise, `systemd-$name` is used. The generated systemd service contains a dependency on the +service unit generated for that `.volume` unit, or on `$name-volume.service` if the `.volume` unit +is not found + +This key can be listed multiple times. + ## Image units [Image] Image files are named with a `.image` extension and contain a section `[Image]` describing the @@ -1321,6 +1597,11 @@ exists on the host, pulling it if needed. Using image units allows containers and volumes to depend on images being automatically pulled. This is particularly interesting when using special options to control image pulls. +Note: The generated service have a dependency on `network-online.target` assuring the network is reachable if +an image needs to be pulled. +If the image service needs to run without available network (e.g. early in boot), the requirement can be +overriden simply by adding an empty `After=` in the unit file. This will unset all previously set After's. + Valid options for `[Image]` are listed below: | **[Image] options** | **podman image pull equivalent** | @@ -1493,6 +1774,26 @@ Yaml=/opt/k8s/deployment.yml WantedBy=multi-user.target default.target ``` +Example for locally built image to be used in a container: + +`test.build` +``` +[Build] +# Tag the image to be built +ImageTag=localhost/imagename + +# Set the working directory to the path of the unit file, +# expecting to find a Containerfile/Dockerfile +# + other files needed to build the image +SetWorkingDirectory=unit +``` + +`test.container` +``` +[Container] +Image=test.build +``` + Example `test.volume`: ``` @@ -1527,6 +1828,32 @@ Exec=sh -c "sleep inf" Pod=test.pod ``` +Example `s3fs.volume`: + +For further details, please see the [s3fs-fuse](https://github.com/s3fs-fuse/s3fs-fuse) project. +Remember to read the [FAQ](https://github.com/s3fs-fuse/s3fs-fuse/wiki/FAQ) + +> NOTE: Enabling the cache massively speeds up access and write times on static files/objects. + +> However, `use_cache` is [UNBOUNDED](https://github.com/s3fs-fuse/s3fs-fuse/wiki/FAQ#q-how-does-the-local-file-cache-work)! + +> Be careful, it will fill up with any files accessed on the s3 bucket through the file system. + +Please remember to set `S3_BUCKET`, `PATH`, `AWS_REGION`. `CACHE_DIRECTORY` should be set up by [systemd](https://www.freedesktop.org/software/systemd/man/latest/systemd.exec.html#RuntimeDirectory=) + +``` +[Service] +CacheDirectory=s3fs +ExecStartPre=/usr/local/bin/aws s3api put-object --bucket ${S3_BUCKET} --key ${PATH}/ + +[Volume] +Device=${S3_BUCKET}:/${PATH} +Type=fuse.s3fs +VolumeName=s3fs-volume +Options=iam_role,endpoint=${AWS_REGION},use_xattr,listobjectsv2,del_cache,use_cache=${CACHE_DIRECTORY} +# `iam_role` assumes inside EC2, if not, Use `profile=` instead +``` + ## SEE ALSO **[systemd.unit(5)](https://www.freedesktop.org/software/systemd/man/systemd.unit.html)**, **[systemd.service(5)](https://www.freedesktop.org/software/systemd/man/systemd.service.html)**, diff --git a/docs/source/markdown/podman-unmount.1.md.in b/docs/source/markdown/podman-unmount.1.md.in index 8e09d6f58f..8b8653c7f8 100644 --- a/docs/source/markdown/podman-unmount.1.md.in +++ b/docs/source/markdown/podman-unmount.1.md.in @@ -42,17 +42,17 @@ Note: Other processes can fail if the mount point they are using is removed with ## EXAMPLE -Unmount container with a given ID +Unmount container with a given ID: ``` podman container unmount containerID ``` -Unmount multiple containers with given IDs +Unmount multiple containers with given IDs: ``` podman unmount containerID1 containerID2 containerID3 ``` -Unmount all containers +Unmount all containers: ``` podman unmount --all ``` diff --git a/docs/source/markdown/podman-update.1.md.in b/docs/source/markdown/podman-update.1.md.in index 913f6a7180..9cce804aa9 100644 --- a/docs/source/markdown/podman-update.1.md.in +++ b/docs/source/markdown/podman-update.1.md.in @@ -1,7 +1,7 @@ % podman-update 1 ## NAME -podman\-update - Update the cgroup configuration of a given container +podman\-update - Update the configuration of a given container ## SYNOPSIS **podman update** [*options*] *container* @@ -10,10 +10,8 @@ podman\-update - Update the cgroup configuration of a given container ## DESCRIPTION -Updates the cgroup configuration of an already existing container. The currently supported options are a subset of the -podman create/run resource limits options. These new options are non-persistent and only last for the current execution of the container; the configuration is honored on its next run. -This means that this command can only be executed on an already running container and the changes made is erased the next time the container is stopped and restarted, this is to ensure immutability. -This command takes one argument, a container name or ID, alongside the resource flags to modify the cgroup. +Updates the configuration of an already existing container, allowing different resource limits to be set. +The currently supported options are a subset of the podman create/run resource limit options. ## OPTIONS @@ -55,6 +53,8 @@ This command takes one argument, a container name or ID, alongside the resource @@option pids-limit +@@option restart + ## EXAMPLEs diff --git a/docs/source/markdown/podman-volume-create.1.md b/docs/source/markdown/podman-volume-create.1.md index ca6e400909..67b97aa7b6 100644 --- a/docs/source/markdown/podman-volume-create.1.md +++ b/docs/source/markdown/podman-volume-create.1.md @@ -103,7 +103,7 @@ Create image named volume using the specified local image in containers/storage. ## QUOTAS -podman volume create uses `XFS project quota controls` for controlling the size and the number of inodes of builtin volumes. The directory used to store the volumes must be an `XFS` file system and be mounted with the `pquota` option. +`podman volume create` uses `XFS project quota controls` for controlling the size and the number of inodes of builtin volumes. The directory used to store the volumes must be an `XFS` file system and be mounted with the `pquota` option. Example /etc/fstab entry: ``` @@ -129,6 +129,77 @@ All containers are assigned larger project IDs (e.g. >= 100000). All volume assigned project IDs larger project IDs starting with 200000. This prevents xfs_quota management conflicts with containers/storage. +## MOUNT EXAMPLES + +`podman volume create` allows the `type`, `device`, and `o` options to be passed to `mount(8)` when using the `local` driver. + +## [s3fs-fuse](https://github.com/s3fs-fuse/s3fs-fuse) + +[s3fs-fuse](https://github.com/s3fs-fuse/s3fs-fuse) or just `s3fs`, is a [fuse](https://github.com/libfuse/libfuse) filesystem that allows s3 prefixes to be mounted as filesystem mounts. + +**Installing:** +```shell +$ doas dnf install s3fs-fuse +``` + +**Simple usage:** +```shell +$ s3fs --help +$ s3fs -o use_xattr,endpoint=aq-central-1 bucket:/prefix /mnt +``` + +**Equivalent through `mount(8)`** +```shell +$ mount -t fuse.s3fs -o use_xattr,endpoint=aq-central-1 bucket:/prefix /mnt +``` + +**Equivalent through `podman volume create`** +```shell +$ podman volume create s3fs-fuse-volume -o type=fuse.s3fs -o device=bucket:/prefix -o o=use_xattr,endpoint=aq-central-1 +``` + +**The volume can then be mounted in a container with** +```shell +$ podman run -v s3fs-fuse-volume:/s3:z --rm -it fedora:latest +``` + +Please see the available [options](https://github.com/s3fs-fuse/s3fs-fuse/wiki/Fuse-Over-Amazon#options) on their wiki. + +### Using with other container users + +The above example works because the volume is mounted as the host user and inside the container `root` is mapped to the user in the host. + +If the mount is accessed by a different user inside the container, a "Permission denied" error will be raised. + +```shell +$ podman run --user bin:bin -v s3fs-fuse-volume:/s3:z,U --rm -it fedora:latest +$ ls /s3 +# ls: /s3: Permission denied +``` + +In FUSE-land, mounts are protected for the user who mounted them; specify the `allow_other` mount option if other users need access. +> Note: This will remove the normal fuse [security measures](https://github.com/libfuse/libfuse/wiki/FAQ#why-dont-other-users-have-access-to-the-mounted-filesystem) on the mount point, after which, the normal filesystem permissions will have to protect it + +```shell +$ podman volume create s3fs-fuse-other-volume -o type=fuse.s3fs -o device=bucket:/prefix -o o=allow_other,use_xattr,endpoint=aq-central-1 +$ podman run --user bin:bin -v s3fs-fuse-volume:/s3:z,U --rm -it fedora:latest +$ ls /s3 +``` + +### The Prefix must exist + +`s3fs` will fail to mount if the prefix does not exist in the bucket. + +Create a s3-directory by putting an empty object at the desired `prefix/` key +```shell +$ aws s3api put-object --bucket bucket --key prefix/ +``` + +If performance is the priority, please check out the more performant [goofys](https://github.com/kahing/goofys). + +> FUSE filesystems exist for [Google Cloud Storage](https://github.com/GoogleCloudPlatform/gcsfuse) and [Azure Blob Storage](https://github.com/Azure/azure-storage-fuse) + + ## SEE ALSO **[podman(1)](podman.1.md)**, **[containers.conf(5)](https://github.com/containers/common/blob/main/docs/containers.conf.5.md)**, **[podman-volume(1)](podman-volume.1.md)**, **mount(8)**, **xfs_quota(8)**, **xfs_quota(8)**, **projects(5)**, **projid(5)** diff --git a/docs/source/markdown/podman-volume-inspect.1.md b/docs/source/markdown/podman-volume-inspect.1.md index 7e7e831feb..ed1a109d05 100644 --- a/docs/source/markdown/podman-volume-inspect.1.md +++ b/docs/source/markdown/podman-volume-inspect.1.md @@ -26,25 +26,25 @@ Format volume output using Go template Valid placeholders for the Go template are listed below: -| **Placeholder** | **Description** | -| ------------------- | ------------------------------------------------------ | -| .Anonymous | Indicates whether volume is anonymous | -| .CreatedAt ... | Volume creation time | -| .Driver | Volume driver | -| .GID | GID the volume was created with | -| .Labels ... | Label information associated with the volume | -| .LockNumber | Number of the volume's Libpod lock | -| .MountCount | Number of times the volume is mounted | -| .Mountpoint | Source of volume mount point | -| .Name | Volume name | -| .NeedsChown | Indicates volume needs to be chowned on first use | -| .NeedsCopyUp | Indicates volume needs dest data copied up on first use| -| .Options ... | Volume options | -| .Scope | Volume scope | -| .Status ... | Status of the volume | -| .StorageID | StorageID of the volume | -| .Timeout | Timeout of the volume | -| .UID | UID the volume was created with | +| **Placeholder** | **Description** | +| ------------------- | --------------------------------------------------------------------------- | +| .Anonymous | Indicates whether volume is anonymous | +| .CreatedAt ... | Volume creation time | +| .Driver | Volume driver | +| .GID | GID the volume was created with | +| .Labels ... | Label information associated with the volume | +| .LockNumber | Number of the volume's Libpod lock | +| .MountCount | Number of times the volume is mounted | +| .Mountpoint | Source of volume mount point | +| .Name | Volume name | +| .NeedsChown | Indicates volume will be chowned on next use | +| .NeedsCopyUp | Indicates data at the destination will be copied into the volume on next use| +| .Options ... | Volume options | +| .Scope | Volume scope | +| .Status ... | Status of the volume | +| .StorageID | StorageID of the volume | +| .Timeout | Timeout of the volume | +| .UID | UID the volume was created with | #### **--help** diff --git a/docs/source/markdown/podman-volume-mount.1.md b/docs/source/markdown/podman-volume-mount.1.md index 96268515cf..ce32581439 100644 --- a/docs/source/markdown/podman-volume-mount.1.md +++ b/docs/source/markdown/podman-volume-mount.1.md @@ -19,8 +19,16 @@ returned. ## EXAMPLE +Mount specified volume. As Root: ``` -podman volume mount foo +# podman volume mount foo +/home/dwalsh/.local/share/containers/storage/volumes/foo/_data +``` + +In rootless mode, volume mounting only works after executing the podman unshare command to enter the user namespace. +``` +$ podman unshare +# podman volume mount foo /home/dwalsh/.local/share/containers/storage/volumes/foo/_data ``` diff --git a/docs/source/markdown/podman-volume-unmount.1.md b/docs/source/markdown/podman-volume-unmount.1.md index 3c46348355..2f07ca1e0f 100644 --- a/docs/source/markdown/podman-volume-unmount.1.md +++ b/docs/source/markdown/podman-volume-unmount.1.md @@ -19,12 +19,12 @@ counter reaches zero indicating no other processes are using the mount. ## EXAMPLE -Unmount volume with a given ID +Unmount volume with a given ID: ``` podman volume unmount volumeID ``` -Unmount multiple volumes with given IDs +Unmount multiple volumes with given IDs: ``` podman volume unmount volumeID1 volumeID2 volumeID3 ``` diff --git a/docs/source/markdown/podman.1.md b/docs/source/markdown/podman.1.md index b94afd9687..f03689cc00 100644 --- a/docs/source/markdown/podman.1.md +++ b/docs/source/markdown/podman.1.md @@ -384,7 +384,7 @@ the exit codes follow the `chroot` standard, see below: | [podman-unpause(1)](podman-unpause.1.md) | Unpause one or more containers. | | [podman-unshare(1)](podman-unshare.1.md) | Run a command inside of a modified user namespace. | | [podman-untag(1)](podman-untag.1.md) | Remove one or more names from a locally-stored image. | -| [podman-update(1)](podman-update.1.md) | Update the cgroup configuration of a given container. | +| [podman-update(1)](podman-update.1.md) | Update the configuration of a given container. | | [podman-version(1)](podman-version.1.md) | Display the Podman version information. | | [podman-volume(1)](podman-volume.1.md) | Simple management tool for volumes. | | [podman-wait(1)](podman-wait.1.md) | Wait on one or more containers to stop and print their exit codes. | diff --git a/docs/source/volume.rst b/docs/source/volume.rst deleted file mode 100644 index af81f39bc1..0000000000 --- a/docs/source/volume.rst +++ /dev/null @@ -1,17 +0,0 @@ -Volume -====== -:doc:`create ` Create a new volume - -:doc:`exists ` Check if the given volume exists - -:doc:`export ` Exports volume to external tar - -:doc:`import ` Import tarball contents into a podman volume - -:doc:`inspect ` Display detailed information on one or more volumes - -:doc:`ls ` List volumes - -:doc:`prune ` Remove all unused volumes - -:doc:`rm ` Remove one or more volumes diff --git a/docs/tutorials/performance.md b/docs/tutorials/performance.md index 62258be3df..92e07d12ac 100644 --- a/docs/tutorials/performance.md +++ b/docs/tutorials/performance.md @@ -83,7 +83,9 @@ because different UID/GID mappings could potentially be used on each invocation. __--userns__, __--uidmap__ and __--gidmap__ the performance penalty is a one-time cost that only occurs the first time the command is run. -Using native overlayfs as an unprivileged user is only available for Podman version >= 3.1 on a Linux kernel version >= 5.12. +Using native overlayfs as an unprivileged user is available for Podman version >= 3.1 on a Linux kernel version >= 5.13. +If SELinux is not used, then Linux kernel version 5.11 or later is sufficient. +Native overlayfs support is included in RHEL >= 8.5, see [release notes](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/8.5_release_notes/index). To show the current storage driver @@ -142,14 +144,13 @@ See storage.conf(5) for all available configuration settings. ### Network performance for rootless Podman -When using rootless Podman, network traffic is normally passed through -[slirp4netns](https://github.com/containers/podman/blob/main/docs/tutorials/basic_networking.md#slirp4netns). -This comes with a performance penalty. +When using rootless Podman, network traffic is normally passed through the network driver +[pasta](https://passt.top/passt/about/#pasta). This comes with a performance penalty. -You can avoid using slirp4netns in the following ways: +You can avoid using _pasta_ in the following ways: * Use socket activation for listening network sockets. Communication over the activated socket does not pass through - slirp4netns, so it has the same performance characteristics as the normal network on the host. + pasta, so it has the same performance characteristics as the normal network on the host. Socket-activated services can be started and stopped in different ways: + Let systemd start the service when the first client connects. Let the service terminate by itself after some time of inactivity. Using a service on demand, can free up compute resources. @@ -158,13 +159,28 @@ You can avoid using slirp4netns in the following ways: The [socket activation tutorial](https://github.com/containers/podman/blob/main/docs/tutorials/socket_activation.md) provides more information about socket activation support in Podman. -* Use the network driver [_pasta_](https://passt.top/passt/about/#pasta). Pasta is under development and currently needs a patched Podman to run. - -* Set up the network manually as root. Create a bridge and virtual ethernet pair (VETH). See the [example](https://lists.podman.io/archives/list/podman@lists.podman.io/thread/W6MCYO6RY5YFRTSUDAOEZA7SC2EFXRZE/) posted on the Podman mailing list. See also the section _DIY networking_ in [Podman-Rootless-Networking.pdf](https://podman.io/community/meeting/notes/2021-10-05/Podman-Rootless-Networking.pdf). +* Set up the network manually as root. Create a bridge and virtual ethernet pair (VETH). Note: compared to other methods, + this setup doesn't provide any network isolation. In containers granted CAP_NET_ADMIN or CAP_NET_RAW, processes can + open packet or raw sockets directly facing the host, which allows them to send arbitrary frames, including + crafted Ethernet and IP packets, as well as receiving packets that were not originally intended for the container, + by means of ARP spoofing. + For more information, see + + An [example](https://lists.podman.io/archives/list/podman@lists.podman.io/thread/W6MCYO6RY5YFRTSUDAOEZA7SC2EFXRZE/) posted on the Podman mailing list + + The section _DIY networking_ in [Podman-Rootless-Networking.pdf](https://containers.github.io/podman.io_old/old/community/meeting/notes/2021-10-05/Podman-Rootless-Networking.pdf) * Use `--network=host`. No network namespace is created. The container will use the host’s network. Note: By using `--network=host`, the container is given full access to local system services such as D-bus and is therefore considered insecure. +Side note: Pasta is faster than the network driver [slirp4netns](https://github.com/containers/podman/blob/main/docs/tutorials/basic_networking.md#slirp4netns). +Pasta is the default network driver since Podman 5.0.0. + +Since Podman 5.1.0 the default network driver can be shown with + +``` +$ podman info -f '{{.Host.RootlessNetworkCmd}}' +pasta +``` + ### Lazy pulling of container images Podman supports lazy pulling for the following container image formats: diff --git a/docs/tutorials/podman-for-windows.md b/docs/tutorials/podman-for-windows.md index 34489c271e..00e0994f75 100644 --- a/docs/tutorials/podman-for-windows.md +++ b/docs/tutorials/podman-for-windows.md @@ -397,7 +397,7 @@ Recovering from a failed auto-installation of WSL If auto-install fails and retrying is unsuccessful, you can attempt to reset your WSL system state and perform a manual WSL installation using the `wsl ---install command`. To do so, perform the following steps: +--install` command. To do so, perform the following steps: 1. Launch PowerShell as administrator ``` diff --git a/go.mod b/go.mod index cde495c397..44cad31b82 100644 --- a/go.mod +++ b/go.mod @@ -1,33 +1,36 @@ module github.com/containers/podman/v5 -go 1.20 +go 1.21 + +// Warning: Ensure the "go" and "toolchain" versions match exactly to prevent unwanted auto-updates +toolchain go1.21.0 require ( - github.com/BurntSushi/toml v1.3.2 - github.com/Microsoft/go-winio v0.6.1 + github.com/BurntSushi/toml v1.4.0 + github.com/Microsoft/go-winio v0.6.2 github.com/blang/semver/v4 v4.0.0 github.com/buger/goterm v1.0.4 - github.com/checkpoint-restore/checkpointctl v1.1.0 - github.com/checkpoint-restore/go-criu/v7 v7.0.0 - github.com/containernetworking/plugins v1.4.0 - github.com/containers/buildah v1.34.1-0.20240201124221-b850c711ff5c - github.com/containers/common v0.57.1-0.20240207210145-1eeaf97594e9 + github.com/checkpoint-restore/checkpointctl v1.2.1 + github.com/checkpoint-restore/go-criu/v7 v7.1.0 + github.com/containernetworking/plugins v1.5.1 + github.com/containers/buildah v1.36.0 + github.com/containers/common v0.59.1-0.20240603155017-49ad520556e7 github.com/containers/conmon v2.0.20+incompatible - github.com/containers/gvisor-tap-vsock v0.7.3 - github.com/containers/image/v5 v5.29.2-0.20240130233108-e66a1ade2efc - github.com/containers/libhvee v0.6.1-0.20240205152934-3a16bce3e4be - github.com/containers/ocicrypt v1.1.9 + github.com/containers/gvisor-tap-vsock v0.7.4-0.20240515153903-01a1a0cd3f70 + github.com/containers/image/v5 v5.31.1-0.20240603155732-aa935041e316 + github.com/containers/libhvee v0.7.1 + github.com/containers/ocicrypt v1.2.0 github.com/containers/psgo v1.9.0 - github.com/containers/storage v1.52.1-0.20240202181245-1419a5980565 + github.com/containers/storage v1.54.1-0.20240627145511-52b643e1ff51 github.com/containers/winquit v1.1.0 github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 github.com/coreos/stream-metadata-go v0.4.4 - github.com/crc-org/crc/v2 v2.32.0 - github.com/crc-org/vfkit v0.5.0 - github.com/cyphar/filepath-securejoin v0.2.4 + github.com/crc-org/crc/v2 v2.38.0 + github.com/crc-org/vfkit v0.5.1 + github.com/cyphar/filepath-securejoin v0.2.5 github.com/digitalocean/go-qemu v0.0.0-20230711162256-2e3d0186973e github.com/docker/distribution v2.8.3+incompatible - github.com/docker/docker v25.0.3+incompatible + github.com/docker/docker v26.1.4+incompatible github.com/docker/go-connections v0.5.0 github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 github.com/docker/go-units v0.5.0 @@ -37,136 +40,136 @@ require ( github.com/google/uuid v1.6.0 github.com/gorilla/handlers v1.5.2 github.com/gorilla/mux v1.8.1 - github.com/gorilla/schema v1.2.1 + github.com/gorilla/schema v1.4.1 github.com/hashicorp/go-multierror v1.1.1 github.com/hugelgupf/p9 v0.3.1-0.20230822151754-54f5c5530921 github.com/json-iterator/go v1.1.12 + github.com/klauspost/pgzip v1.2.6 github.com/linuxkit/virtsock v0.0.0-20220523201153-1a23e78aa7a2 github.com/mattn/go-shellwords v1.0.12 - github.com/mattn/go-sqlite3 v1.14.21 + github.com/mattn/go-sqlite3 v1.14.22 github.com/mdlayher/vsock v1.2.1 github.com/moby/sys/user v0.1.0 github.com/moby/term v0.5.0 github.com/nxadm/tail v1.4.11 - github.com/onsi/ginkgo/v2 v2.15.0 - github.com/onsi/gomega v1.31.1 + github.com/onsi/ginkgo/v2 v2.19.0 + github.com/onsi/gomega v1.33.1 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.0 github.com/opencontainers/runc v1.1.12 - github.com/opencontainers/runtime-spec v1.1.1-0.20230823135140-4fec88fd00a4 + github.com/opencontainers/runtime-spec v1.2.0 github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc github.com/opencontainers/selinux v1.11.0 - github.com/openshift/imagebuilder v1.2.6-0.20231127234745-ef2a5fe47510 - github.com/rootless-containers/rootlesskit v1.1.1 - github.com/shirou/gopsutil/v3 v3.23.12 + github.com/openshift/imagebuilder v1.2.11 + github.com/rootless-containers/rootlesskit/v2 v2.1.0 + github.com/shirou/gopsutil/v3 v3.24.5 github.com/sirupsen/logrus v1.9.3 - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 - github.com/ulikunitz/xz v0.5.11 - github.com/vbauerster/mpb/v8 v8.7.2 + github.com/vbauerster/mpb/v8 v8.7.3 github.com/vishvananda/netlink v1.2.1-beta.2 - go.etcd.io/bbolt v1.3.8 - golang.org/x/exp v0.0.0-20231226003508-02704c960a9b - golang.org/x/net v0.20.0 - golang.org/x/sync v0.6.0 - golang.org/x/sys v0.17.0 - golang.org/x/term v0.17.0 - golang.org/x/text v0.14.0 - google.golang.org/protobuf v1.32.0 + go.etcd.io/bbolt v1.3.10 + golang.org/x/crypto v0.24.0 + golang.org/x/exp v0.0.0-20240531132922-fd00a4e0eefc + golang.org/x/net v0.26.0 + golang.org/x/sync v0.7.0 + golang.org/x/sys v0.21.0 + golang.org/x/term v0.21.0 + golang.org/x/text v0.16.0 + google.golang.org/protobuf v1.34.2 gopkg.in/inf.v0 v0.9.1 - gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/kubernetes v1.28.4 sigs.k8s.io/yaml v1.4.0 - tags.cncf.io/container-device-interface v0.6.2 + tags.cncf.io/container-device-interface v0.7.2 ) require ( dario.cat/mergo v1.0.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/Microsoft/hcsshim v0.12.0-rc.2 // indirect + github.com/Microsoft/hcsshim v0.12.4 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/bytedance/sonic v1.10.1 // indirect + github.com/bytedance/sonic v1.10.2 // indirect github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect - github.com/chenzhuoyu/iasm v0.9.0 // indirect + github.com/chenzhuoyu/iasm v0.9.1 // indirect github.com/chzyer/readline v1.5.1 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect - github.com/containerd/containerd v1.7.13 // indirect + github.com/containerd/containerd v1.7.17 // indirect + github.com/containerd/errdefs v0.1.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect github.com/containerd/typeurl/v2 v2.1.1 // indirect github.com/containernetworking/cni v1.1.2 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect - github.com/containers/luksy v0.0.0-20240129181507-b62d551ce6d8 // indirect - github.com/coreos/go-oidc/v3 v3.9.0 // indirect + github.com/containers/luksy v0.0.0-20240506205542-84b50f50f3ee // indirect + github.com/coreos/go-oidc/v3 v3.10.0 // indirect github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e // indirect github.com/disiqueira/gotree/v3 v3.0.2 // indirect - github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker-credential-helpers v0.8.1 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/fsouza/go-dockerclient v1.10.1 // indirect - github.com/gabriel-vasile/mimetype v1.4.2 // indirect + github.com/fsouza/go-dockerclient v1.11.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/gin-gonic/gin v1.9.1 // indirect - github.com/go-jose/go-jose/v3 v3.0.1 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/go-jose/go-jose/v3 v3.0.3 // indirect + github.com/go-jose/go-jose/v4 v4.0.2 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect - github.com/go-openapi/analysis v0.21.4 // indirect - github.com/go-openapi/errors v0.21.0 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/loads v0.21.2 // indirect - github.com/go-openapi/runtime v0.26.0 // indirect - github.com/go-openapi/spec v0.20.9 // indirect - github.com/go-openapi/strfmt v0.22.0 // indirect - github.com/go-openapi/swag v0.22.9 // indirect - github.com/go-openapi/validate v0.22.1 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/errors v0.22.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/runtime v0.28.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/strfmt v0.23.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/validate v0.24.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.15.5 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/go-playground/validator/v10 v10.17.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-cmp v0.6.0 // indirect - github.com/google/go-containerregistry v0.17.0 // indirect + github.com/google/go-containerregistry v0.19.1 // indirect github.com/google/go-intervals v0.0.2 // indirect - github.com/google/pprof v0.0.0-20230323073829-e72429f035bd // indirect + github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-retryablehttp v0.7.5 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jinzhu/copier v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/compress v1.17.5 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect - github.com/klauspost/pgzip v1.2.6 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/kr/fs v0.1.0 // indirect - github.com/kr/pretty v0.3.1 // indirect github.com/leodido/go-urn v1.2.4 // indirect - github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e // indirect + github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.19 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mdlayher/socket v0.4.1 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/buildkit v0.12.5 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/mountinfo v0.7.1 // indirect github.com/moby/sys/sequential v0.5.0 // indirect @@ -176,54 +179,51 @@ require ( github.com/oklog/ulid v1.3.1 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect - github.com/pelletier/go-toml/v2 v2.1.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pkg/sftp v1.13.6 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/proglottis/gpgme v0.1.3 // indirect - github.com/rivo/uniseg v0.4.4 // indirect + github.com/rivo/uniseg v0.4.7 // indirect github.com/seccomp/libseccomp-golang v0.10.0 // indirect github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect github.com/segmentio/ksuid v1.0.4 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/sigstore/fulcio v1.4.3 // indirect - github.com/sigstore/rekor v1.2.2 // indirect - github.com/sigstore/sigstore v1.8.1 // indirect + github.com/sigstore/fulcio v1.4.5 // indirect + github.com/sigstore/rekor v1.3.6 // indirect + github.com/sigstore/sigstore v1.8.4 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect - github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect - github.com/sylabs/sif/v2 v2.15.1 // indirect + github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect + github.com/sylabs/sif/v2 v2.16.0 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 // indirect - github.com/ugorji/go/codec v1.2.11 // indirect + github.com/ugorji/go/codec v1.2.12 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect github.com/vbatts/tar-split v0.11.5 // indirect github.com/vishvananda/netns v0.0.4 // indirect - github.com/yusufpapurcu/wmi v1.2.3 // indirect - go.mongodb.org/mongo-driver v1.13.1 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.mongodb.org/mongo-driver v1.14.0 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect - go.opentelemetry.io/otel v1.21.0 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect - go.opentelemetry.io/otel/sdk v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.21.0 // indirect - golang.org/x/arch v0.5.0 // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect - golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.17.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 // indirect - google.golang.org/grpc v1.60.1 // indirect - gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect + golang.org/x/arch v0.7.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/oauth2 v0.20.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect + google.golang.org/grpc v1.64.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - tags.cncf.io/container-device-interface/specs-go v0.6.0 // indirect + tags.cncf.io/container-device-interface/specs-go v0.7.0 // indirect ) replace github.com/opencontainers/runc => github.com/opencontainers/runc v1.1.1-0.20240131200429-02120488a4c0 diff --git a/go.sum b/go.sum index 627736f515..2f0d4f91b1 100644 --- a/go.sum +++ b/go.sum @@ -2,50 +2,54 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= +github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= -github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Microsoft/hcsshim v0.12.0-rc.2 h1:gfKebjq3Mq17Ys+4cjE8vc2h6tZVeqCGb9a7vBVqpAk= -github.com/Microsoft/hcsshim v0.12.0-rc.2/go.mod h1:G2TZhBED5frlh/hsuxV5CDh/ylkSFknPAMPpQg9owQw= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Microsoft/hcsshim v0.12.4 h1:Ev7YUMHAHoWNm+aDSPzc5W9s6E2jyL1szpVDJeZ/Rr4= +github.com/Microsoft/hcsshim v0.12.4/go.mod h1:Iyl1WVpZzr+UkzjekHZbV8o5Z9ZkxNGx6CtY2Qg/JVQ= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 h1:5L8Mj9Co9sJVgW3TpYk2gxGJnDjsYuboNTcRmbtGKGs= github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6/go.mod h1:3HgLJ9d18kXMLQlJvIY3+FszZYMxCz8WfE2MQ7hDY0w= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/buger/goterm v1.0.4 h1:Z9YvGmOih81P0FbVtEYTFF6YsSgxSUKEhf/f9bTMXbY= github.com/buger/goterm v1.0.4/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE= github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM= -github.com/bytedance/sonic v1.10.1 h1:7a1wuFXL1cMy7a3f7/VFcEtriuXQnUBhtoVfOZiaysc= -github.com/bytedance/sonic v1.10.1/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= +github.com/bytedance/sonic v1.10.2 h1:GQebETVBxYB7JGWJtLBi07OVzWwt+8dWA00gEVW2ZFE= +github.com/bytedance/sonic v1.10.2/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/checkpoint-restore/checkpointctl v1.1.0 h1:plS/2zBzbAXO6DH/H+TqD7ZGhz8iQVb+NLgsOJSTWaw= -github.com/checkpoint-restore/checkpointctl v1.1.0/go.mod h1:DtPd9M4bt/jdt+7DodFxm0lrzdevabk3cbni/FL4BY0= -github.com/checkpoint-restore/go-criu/v7 v7.0.0 h1:R4UF/njKOuq8ooG7naFGsCeKsjv5j+rIhgFgSSeC2KY= -github.com/checkpoint-restore/go-criu/v7 v7.0.0/go.mod h1:xD1v3cPww1QYpJR3+XTTdC8hYubPnptIPsT1daXhbr4= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/checkpointctl v1.2.1 h1:aYFl2CEk95bPLDvNDgif4ZLx3pjCZMJm6td+A0X1+xs= +github.com/checkpoint-restore/checkpointctl v1.2.1/go.mod h1:8oF+AtNUFJAI13ETcbB3clnjiwvviX0QzVBhYzQ8yBA= +github.com/checkpoint-restore/go-criu/v7 v7.1.0 h1:JbQyO4o+P8ycNTMLPiiDqXg49bAcy4WljWCzYQho35A= +github.com/checkpoint-restore/go-criu/v7 v7.1.0/go.mod h1:1svAtmbtvX4BKI45OFzgoTTLG7oYFKdColv/Vcsb2A8= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA= -github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo= github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= +github.com/chenzhuoyu/iasm v0.9.1 h1:tUHQJXo3NhBqw6s33wkGn9SP3bvrWLdlVIJ3hQBL7P0= +github.com/chenzhuoyu/iasm v0.9.1/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= @@ -59,8 +63,10 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= -github.com/containerd/containerd v1.7.13 h1:wPYKIeGMN8vaggSKuV1X0wZulpMz4CrgEsZdaCyB6Is= -github.com/containerd/containerd v1.7.13/go.mod h1:zT3up6yTRfEUa6+GsITYIJNgSVL9NQ4x4h1RPzk0Wu4= +github.com/containerd/containerd v1.7.17 h1:KjNnn0+tAVQHAoaWRjmdak9WlvnFR/8rU1CHHy8Rm2A= +github.com/containerd/containerd v1.7.17/go.mod h1:vK+hhT4TIv2uejlcDlbVIc8+h/BqtKLIyNrtCZol8lI= +github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= +github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU= @@ -69,51 +75,51 @@ github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9Fqctt github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= -github.com/containernetworking/plugins v1.4.0 h1:+w22VPYgk7nQHw7KT92lsRmuToHvb7wwSv9iTbXzzic= -github.com/containernetworking/plugins v1.4.0/go.mod h1:UYhcOyjefnrQvKvmmyEKsUA+M9Nfn7tqULPpH0Pkcj0= -github.com/containers/buildah v1.34.1-0.20240201124221-b850c711ff5c h1:r+1vFyTAoXptJrsPsnOMI3G0jm4+BCfXAcIyuA33lzo= -github.com/containers/buildah v1.34.1-0.20240201124221-b850c711ff5c/go.mod h1:Hw4qo2URFpWvZ2tjLstoQMpNC6+gR4PtxQefvV/UKaA= -github.com/containers/common v0.57.1-0.20240207210145-1eeaf97594e9 h1:JFkj+j5hSOJdCpLhpx+xp1pEbMfXj2xtorRx223PqYo= -github.com/containers/common v0.57.1-0.20240207210145-1eeaf97594e9/go.mod h1:0NtD59teSfqhgJqcgg73on5AuaGo8XbbVLv+i2bl0oY= +github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+E5J/EcKOE4gQ= +github.com/containernetworking/plugins v1.5.1/go.mod h1:MIQfgMayGuHYs0XdNudf31cLLAC+i242hNm6KuDGqCM= +github.com/containers/buildah v1.36.0 h1:e369nE9bx0yJtPVRDMsbr0OzkW59XCYAl+5poGhFjcs= +github.com/containers/buildah v1.36.0/go.mod h1:qlEF4RuCnzEUTQhAnCyGr5WoYNZaU0k2mPcZscUR//c= +github.com/containers/common v0.59.1-0.20240603155017-49ad520556e7 h1:Vp0npRNqZJrtMrOeVPyLNDYojSPbkNm3pQVnuBULubs= +github.com/containers/common v0.59.1-0.20240603155017-49ad520556e7/go.mod h1:G4vF3V1iWu+NxT/pquuJYBcWGsrVKibDhPu9h52nXyI= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= -github.com/containers/gvisor-tap-vsock v0.7.3 h1:yORnf15sP+sLFhxLNLgmB5/lOhldn9dRMHx/tmYtSOQ= -github.com/containers/gvisor-tap-vsock v0.7.3/go.mod h1:NI1fLMtKXQZoDrrOeqryGz7x7j/XSFWRmQILva7Fu9c= -github.com/containers/image/v5 v5.29.2-0.20240130233108-e66a1ade2efc h1:3I5+mrrG7Fuv4aA13t1hAMQcjN3rTAQInfbxa5P+XH4= -github.com/containers/image/v5 v5.29.2-0.20240130233108-e66a1ade2efc/go.mod h1:oMMRA6avp1Na54lVPCj/OvcfXDMLlzfy3H7xeRiWmmI= -github.com/containers/libhvee v0.6.1-0.20240205152934-3a16bce3e4be h1:M0lI66eh3tYtvfcxy78dMbhKuYVP8aE0oLDoS5nDPq0= -github.com/containers/libhvee v0.6.1-0.20240205152934-3a16bce3e4be/go.mod h1:IMG6nPEIBqC3FvxV//mCTRKo12gvY0NqSjRIKQoMaKY= +github.com/containers/gvisor-tap-vsock v0.7.4-0.20240515153903-01a1a0cd3f70 h1:aACcXSIgcuPq5QdNZZ8B53BCdhqYvw33/8QmZWJATvg= +github.com/containers/gvisor-tap-vsock v0.7.4-0.20240515153903-01a1a0cd3f70/go.mod h1:v2JP4sZFltFJ8smHLVm12Ng3jHetrNh565ZwWpB5pzs= +github.com/containers/image/v5 v5.31.1-0.20240603155732-aa935041e316 h1:WMekH3CnJOgVwJmvyg1Ucyt5In7BQx2k0mM+FHixg+I= +github.com/containers/image/v5 v5.31.1-0.20240603155732-aa935041e316/go.mod h1:2oAksrXTiV/ArGnq3RlKHK8+6Wsde5jt4qWHfU7hHxI= +github.com/containers/libhvee v0.7.1 h1:dWGF5GLq9DZvXo3P8aDp3cNieL5eCaSell4UmeA/jY4= +github.com/containers/libhvee v0.7.1/go.mod h1:fRKB3AyIqHMvq6xaeYhTpckM2cdoq0oecolyoiuLP7M= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= -github.com/containers/luksy v0.0.0-20240129181507-b62d551ce6d8 h1:0p58QJRICjkRVCDix1nsnyrtJ3Qj4CWcGd1bOEY9sVY= -github.com/containers/luksy v0.0.0-20240129181507-b62d551ce6d8/go.mod h1:oMhW1fWXz1FGN97rhycbuAwrkXXV1z5c/Bjbn0CSlFY= -github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOjIdEM= -github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys= +github.com/containers/luksy v0.0.0-20240506205542-84b50f50f3ee h1:QU6XNrPcxyGejcEYJfpIH7LwB+yXVbb0tWxf7mZxfN4= +github.com/containers/luksy v0.0.0-20240506205542-84b50f50f3ee/go.mod h1:cEhy3LVQzQqf/BHx0WS6CXmZp+RZZaUKmhQaFZ4NiiU= +github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM= +github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U= github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g= github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A= -github.com/containers/storage v1.52.1-0.20240202181245-1419a5980565 h1:Gcirfx2DNoayB/+ypLgl5+ABzIPPDAoncs1qgZHHQHE= -github.com/containers/storage v1.52.1-0.20240202181245-1419a5980565/go.mod h1:2E/QBqWVcJXwumP7nVUrampwRNL4XKjHL/aQya7ZdhI= +github.com/containers/storage v1.54.1-0.20240627145511-52b643e1ff51 h1:0ipwtt1iNX4gSje0iQHHtnvqnU45uUyGO1LVGBkpoSE= +github.com/containers/storage v1.54.1-0.20240627145511-52b643e1ff51/go.mod h1:y1CGloHDYq9uK3Og/zLkrJ8vpSuFwNaIWOyB8IX076w= github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE= github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8= -github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo= -github.com/coreos/go-oidc/v3 v3.9.0/go.mod h1:rTKz2PYwftcrtoCzV5g5kvfJoWcm0Mk8AF8y1iAQro4= +github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU= +github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 h1:OoRAFlvDGCUqDLampLQjk0yeeSGdF9zzst/3G9IkBbc= github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09/go.mod h1:m2r/smMKsKwgMSAoFKHaa68ImdCSNuKE1MxvQ64xuCQ= github.com/coreos/stream-metadata-go v0.4.4 h1:PM/6iNhofKGydsatiY1zdnMMHBT34skb5P7nfEFR4GU= github.com/coreos/stream-metadata-go v0.4.4/go.mod h1:fMObQqQm8Ku91G04btKzEH3AsdP1mrAb986z9aaK0tE= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crc-org/crc/v2 v2.32.0 h1:I/62j5KrID8ua1vgAUPOVTtzhcsCsHWdqqiIRHySLfQ= -github.com/crc-org/crc/v2 v2.32.0/go.mod h1:Q2XJM3KkR/Gu+tBjeN77pk5P8DWYKdbxCSf+9l9MYcs= -github.com/crc-org/vfkit v0.5.0 h1:co7N/3h5Jl29VfhPIvbF2cSG2bC7vC4DxbBVeppGPY0= -github.com/crc-org/vfkit v0.5.0/go.mod h1:OQiqOghCzdgkd/jRoVu4/lcfQSKje7XPVpfW1aO9YvE= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crc-org/crc/v2 v2.38.0 h1:8QcoH4hYksfKUQOLXHZw4jIY1aNS162WraZR4mzQzC8= +github.com/crc-org/crc/v2 v2.38.0/go.mod h1:HPY6grOaExM4Bhmd3T4RSkP0eizWsb8wSgoSwNsV5+k= +github.com/crc-org/vfkit v0.5.1 h1:r1zNf1g1bLbgu5BgIQodirvYaIGWJQ91eS/PIgNO6lo= +github.com/crc-org/vfkit v0.5.1/go.mod h1:Hqi20zQcqXMk6JqvByvOidHYv+KzPx3G+cjkdGSWv60= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM= github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo= +github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -124,28 +130,32 @@ github.com/digitalocean/go-qemu v0.0.0-20230711162256-2e3d0186973e h1:x5PInTuXLd github.com/digitalocean/go-qemu v0.0.0-20230711162256-2e3d0186973e/go.mod h1:K4+o74YGNjOb9N6yyG+LPj1NjHtk+Qz0IYQPvirbaLs= github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWhkNRq8= github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8= -github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= -github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v25.0.1+incompatible h1:mFpqnrS6Hsm3v1k7Wa/BO23oz0k121MTbTO1lpcGSkU= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v26.1.3+incompatible h1:bUpXT/N0kDE3VUHI2r5VMsYQgi38kYuoC0oL9yt3lqc= +github.com/docker/cli v26.1.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ= -github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= -github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/docker v26.1.4+incompatible h1:vuTpXDuoga+Z38m1OZHzl7NKisKWaWlhjQk7IDPSLsU= +github.com/docker/docker v26.1.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= +github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 h1:YcvzLmdrP/b8kLAGJ8GT7bdncgCAiWxJZIlt84D+RJg= github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651/go.mod h1:LFyLie6XcDbyKGeVK6bHe+9aJTYCxWLBg5IrJZOaXKA= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -153,99 +163,61 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fsouza/go-dockerclient v1.10.1 h1:bSU5Wu2ARdub+iv9VtoDsN8yBUI0vgflmshbeQLKhvc= -github.com/fsouza/go-dockerclient v1.10.1/go.mod h1:dyzGriw6v3pK4O4O1u/X+vXxDDsrnLLkCqYkcLsDq2k= -github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= -github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/fsouza/go-dockerclient v1.11.0 h1:4ZAk6W7rPAtPXm7198EFqA5S68rwnNQORxlOA5OurCA= +github.com/fsouza/go-dockerclient v1.11.0/go.mod h1:0I3TQCRseuPTzqlY4Y3ajfsg2VAdMQoazrkxJTiJg8s= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= -github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= -github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= +github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= +github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk= +github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= -github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= -github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY= -github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= -github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= -github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= -github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= -github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= -github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= -github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= -github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= -github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.22.0 h1:Ew9PnEYc246TwrEspvBdDHS4BVKXy/AOVsfqGDgAcaI= -github.com/go-openapi/strfmt v0.22.0/go.mod h1:HzJ9kokGIju3/K6ap8jL+OlGAbjpSv27135Yr9OivU4= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= -github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= -github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= -github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= +github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7NLylN+x8TTueE24= -github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= -github.com/go-rod/rod v0.114.5 h1:1x6oqnslwFVuXJbJifgxspJUd3O4ntaGhRLHt+4Er9c= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-playground/validator/v10 v10.17.0 h1:SmVVlfAOtlZncTxRuinDPomC2DkXJ4E5T9gDA0AIH74= +github.com/go-playground/validator/v10 v10.17.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-rod/rod v0.116.0 h1:ypRryjTys3EnqHskJ/TdgodFMvXV0EHvmy4bSkKZgHM= +github.com/go-rod/rod v0.116.0/go.mod h1:aiedSEFg5DwG/fnNbUOTPMTTWX3MRj6vIs/a684Mthw= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -270,35 +242,33 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.17.0 h1:5p+zYs/R4VGHkhyvgWurWrpJ2hW4Vv9fQI+GzdcwXLk= -github.com/google/go-containerregistry v0.17.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= +github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY= +github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2 h1:CVuJwN34x4xM2aT4sIKhmeib40NeBPhRihNjQmpJsA4= +github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2/go.mod h1:nOFQdrUlIlx6M6ODdSpBj1NVA+VgLC6kmw60mkw34H4= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk= -github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -307,96 +277,85 @@ github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyE github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/schema v1.2.1 h1:tjDxcmdb+siIqkTNoV+qRH2mjYdr2hHe5MKXbp61ziM= -github.com/gorilla/schema v1.2.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk= +github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E= +github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M= -github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hugelgupf/p9 v0.3.1-0.20230822151754-54f5c5530921 h1:cfYGdNpXGZobTSSDFB+wx2FRfWptM7sCkScJgVx0Tkk= github.com/hugelgupf/p9 v0.3.1-0.20230822151754-54f5c5530921/go.mod h1:nMr69J6AmirlSvzeVLK7gj4DUY1oYtSwcSiSJ7BBb0A= github.com/hugelgupf/socketpair v0.0.0-20230822150718-707395b1939a h1:Nq7wDsqsVBUBfGn8yB1M028ShWTKTtZBcafaTJ35N0s= +github.com/hugelgupf/socketpair v0.0.0-20230822150718-707395b1939a/go.mod h1:71Bqb5Fh9zPHF8jwdmMEmJObzr25Mx5pWLbDBMMEn6E= github.com/hugelgupf/vmtest v0.0.0-20230810222836-f8c8e381617c h1:4A+BVHylCBQPxlW1NrUITDpRAHCeX6QSZHmzzFQqliU= +github.com/hugelgupf/vmtest v0.0.0-20230810222836-f8c8e381617c/go.mod h1:d2FMzS0rIF+3Daufcw660EZfTJihdNPeEwBBJgO4Ap0= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/insomniacslk/dhcp v0.0.0-20230731140434-0f9eb93a696c h1:P/3mFnHCv1A/ej4m8pF5EB6FUt9qEL2Q9lfrcUNwCYs= +github.com/insomniacslk/dhcp v0.0.0-20230731140434-0f9eb93a696c/go.mod h1:7474bZ1YNCvarT6WFKie4kEET6J0KYRDC4XJqqXzQW4= github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.5 h1:d4vBd+7CHydUqpFBgUEKkSdtSugf9YFmSkvUYPquI5E= -github.com/klauspost/compress v1.17.5/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= +github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= -github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e h1:RLTpX495BXToqxpM90Ws4hXEo4Wfh81jr9DX1n/4WOo= -github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e/go.mod h1:EAuqr9VFWxBi9nD5jc/EA2MT1RFty9288TF6zdtYoCU= +github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 h1:aiPrFdHDCCvigNBCkOWj2lv9Bx5xDp210OANZEoiP0I= +github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0/go.mod h1:srVwm2N3DC/tWqQ+igZXDrmKlNRN8X/dmJ1wEZrv760= github.com/linuxkit/virtsock v0.0.0-20220523201153-1a23e78aa7a2 h1:DZMFueDbfz6PNc1GwDRA8+6lBx1TB9UnxDQliCqR73Y= github.com/linuxkit/virtsock v0.0.0-20220523201153-1a23e78aa7a2/go.mod h1:SWzULI85WerrFt3u+nIm5F9l7EvxZTKQvd0InF3nmgM= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.14.21 h1:IXocQLOykluc3xPE0Lvy8FtggMz1G+U3mEjg+0zGizc= -github.com/mattn/go-sqlite3 v1.14.21/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mdlayher/packet v1.1.2 h1:3Up1NG6LZrsgDVn6X4L9Ge/iyRyxFEFD9o6Pr3Q1nQY= +github.com/mdlayher/packet v1.1.2/go.mod h1:GEu1+n9sG5VtiRE4SydOmX5GTwyyYlteZiFU+x0kew4= github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= @@ -405,12 +364,12 @@ github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU= github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/buildkit v0.12.5 h1:RNHH1l3HDhYyZafr5EgstEu8aGNCwyfvMtrQDtjH9T0= github.com/moby/buildkit v0.12.5/go.mod h1:YGwjA2loqyiYfZeEo8FtI7z4x5XponAaIWsWcSjWwso= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= @@ -426,10 +385,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= @@ -440,40 +397,36 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= -github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= -github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opencontainers/runc v1.1.1-0.20240131200429-02120488a4c0 h1:NwSQ/5rex97Rum/xZOMjlDQbbZ8YJKOTihf9sxqHxtE= github.com/opencontainers/runc v1.1.1-0.20240131200429-02120488a4c0/go.mod h1:tBsQqk9ETVlXxzXjk2Xh/1VjxC/U3Gaq5ps/rC/cadE= -github.com/opencontainers/runtime-spec v1.1.1-0.20230823135140-4fec88fd00a4 h1:EctkgBjZ1y4q+sibyuuIgiKpa0QSd2elFtSSdNvBVow= -github.com/opencontainers/runtime-spec v1.1.1-0.20230823135140-4fec88fd00a4/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= +github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc h1:d2hUh5O6MRBvStV55MQ8we08t42zSTqBbscoQccWmMc= github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc/go.mod h1:8tx1helyqhUC65McMm3x7HmOex8lO2/v9zPuxmKHurs= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= -github.com/openshift/imagebuilder v1.2.6-0.20231127234745-ef2a5fe47510 h1:ILAESc7vHTVNKctTiR10XC+vACPlR4NbS6570G6QQmY= -github.com/openshift/imagebuilder v1.2.6-0.20231127234745-ef2a5fe47510/go.mod h1:nOaQJMj7VZgdqATqES4GxZX/p6gwK2r7bpE3Ry63+jM= +github.com/openshift/imagebuilder v1.2.11 h1:4EmEMyiLr7jlskS1h6V6smdcrQSGLRdcIeaXeV3F8EM= +github.com/openshift/imagebuilder v1.2.11/go.mod h1:KkkXOyRjJlZEXWQtHNBNzVHqh4vf/0xX5cDIQ2gr+5I= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= -github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= -github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= @@ -485,23 +438,25 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0= github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4DsQCw= +github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= -github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rootless-containers/rootlesskit v1.1.1 h1:F5psKWoWY9/VjZ3ifVcaosjvFZJOagX85U22M0/EQZE= -github.com/rootless-containers/rootlesskit v1.1.1/go.mod h1:UD5GoA3dqKCJrnvnhVgQQnweMF2qZnf9KLw8EewcMZI= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rootless-containers/rootlesskit/v2 v2.1.0 h1:dKqduSlzo5TlTv7tTIoTct2cRUNQf+soqcs+6b1ynvE= +github.com/rootless-containers/rootlesskit/v2 v2.1.0/go.mod h1:W+5NaXv3l2sD4LiPxRWLOlY+p9H0+Ev71zel/zFRnLo= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= +github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= @@ -509,38 +464,33 @@ github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8 github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= -github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= +github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ= -github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og= -github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY= -github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg= -github.com/sigstore/sigstore v1.8.1 h1:mAVposMb14oplk2h/bayPmIVdzbq2IhCgy4g6R0ZSjo= -github.com/sigstore/sigstore v1.8.1/go.mod h1:02SL1158BSj15bZyOFz7m+/nJzLZfFd9A8ab3Kz7w/E= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sigstore/fulcio v1.4.5 h1:WWNnrOknD0DbruuZWCbN+86WRROpEl3Xts+WT2Ek1yc= +github.com/sigstore/fulcio v1.4.5/go.mod h1:oz3Qwlma8dWcSS/IENR/6SjbW4ipN0cxpRVfgdsjMU8= +github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= +github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= +github.com/sigstore/sigstore v1.8.4 h1:g4ICNpiENFnWxjmBzBDWUn62rNFeny/P77HUC8da32w= +github.com/sigstore/sigstore v1.8.4/go.mod h1:1jIKtkTFEeISen7en+ZPWdDHazqhxco/+v9CNjc7oNg= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I= -github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -549,15 +499,15 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/sylabs/sif/v2 v2.15.1 h1:75BcunPOY11fVhe02/WHuNLTfDd3OHH0ex0MuuNMYX0= -github.com/sylabs/sif/v2 v2.15.1/go.mod h1:YiwCUdZOhiohnPbyxuxvCZa+03HwAaiC+vfAKZPR8nQ= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/sylabs/sif/v2 v2.16.0 h1:2eqaBaQQsn5DZTzm3QZm0HupZQEjNXfxRnCmtyCihEU= +github.com/sylabs/sif/v2 v2.16.0/go.mod h1:d5TxgD/mhMUU3kWLmZmWJQ99Wg0asaTP0bq3ezR1xpg= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= @@ -567,87 +517,86 @@ github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9f github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/u-root/gobusybox/src v0.0.0-20230806212452-e9366a5b9fdc h1:udgfN9Qy573qgHWMEORFgy6YXNDiN/Fd5LlKdlp+/Mo= +github.com/u-root/gobusybox/src v0.0.0-20230806212452-e9366a5b9fdc/go.mod h1:lYt+LVfZBBwDZ3+PHk4k/c/TnKOkjJXiJO73E32Mmpc= github.com/u-root/u-root v0.11.1-0.20230807200058-f87ad7ccb594 h1:1AIJqOtdEufYfGb3eRpdaqWONzBOpAwrg1fehbWg+Mg= +github.com/u-root/u-root v0.11.1-0.20230807200058-f87ad7ccb594/go.mod h1:PQzg9XJGp6Y1hRmTUruSO7lR7kKR6FpoSObf5n5bTfE= github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 h1:YcojQL98T/OO+rybuzn2+5KrD5dBwXIvYBvQ2cD3Avg= github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= -github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= -github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= -github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= -github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= +github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/vbauerster/mpb/v8 v8.7.2 h1:SMJtxhNho1MV3OuFgS1DAzhANN1Ejc5Ct+0iSaIkB14= -github.com/vbauerster/mpb/v8 v8.7.2/go.mod h1:ZFnrjzspgDHoxYLGvxIruiNk73GNTPG4YHgVNpR10VY= +github.com/vbauerster/mpb/v8 v8.7.3 h1:n/mKPBav4FFWp5fH4U0lPpXfiOmCEgl5Yx/NM3tKJA0= +github.com/vbauerster/mpb/v8 v8.7.3/go.mod h1:9nFlNpDGVoTmQ4QvNjSLtwLmAFjwmq0XaAF26toHGNM= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= -github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= -github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= +github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= +github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= github.com/ysmood/got v0.34.1 h1:IrV2uWLs45VXNvZqhJ6g2nIhY+pgIG1CUoOcqfXFl1s= +github.com/ysmood/got v0.34.1/go.mod h1:yddyjq/PmAf08RMLSwDjPyCvHvYed+WjHnQxpH851LM= github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= +github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak= +github.com/ysmood/leakless v0.8.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= -go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= -go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= -go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= -go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= +go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= +go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y= -golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= +golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20231226003508-02704c960a9b h1:kLiC65FbiHWFAOu+lxwNPujcsl8VYyTYYEZnsOO1WK4= -golang.org/x/exp v0.0.0-20231226003508-02704c960a9b/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/exp v0.0.0-20240531132922-fd00a4e0eefc h1:O9NuF4s+E/PvMIy+9IUZB9znFwUIXEWSstNjek6VpVg= +golang.org/x/exp v0.0.0-20240531132922-fd00a4e0eefc/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -655,8 +604,9 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -670,37 +620,31 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= +golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -715,7 +659,6 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -733,65 +676,64 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg= -google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 h1:/jFB8jK5R3Sq3i/lmeZO0cATSzFfZaJq1J2Euan3XKU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0/go.mod h1:FUoWkonphQm3RhTS+kOEhF8h0iDpm4tdXolVCeZ9KKA= +google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s= +google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4= +google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -804,17 +746,12 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/go-jose/go-jose.v2 v2.6.1 h1:qEzJlIDmG9q5VO0M/o8tGS65QMHMS1w01TQJB1VPJ4U= -gopkg.in/go-jose/go-jose.v2 v2.6.1/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= @@ -823,34 +760,24 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/apimachinery v0.27.4 h1:CdxflD4AF61yewuid0fLl6bM4a3q04jWel0IlP+aYjs= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/kubernetes v1.28.4 h1:aRNxs5jb8FVTtlnxeA4FSDBVKuFwA8Gw40/U2zReBYA= -k8s.io/kubernetes v1.28.4/go.mod h1:BTzDCKYAlu6LL9ITbfjwgwIrJ30hlTgbv0eXDoA/WoA= -k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= src.elv.sh v0.16.0-rc1.0.20220116211855-fda62502ad7f h1:pjVeIo9Ba6K1Wy+rlwX91zT7A+xGEmxiNRBdN04gDTQ= -tags.cncf.io/container-device-interface v0.6.2 h1:dThE6dtp/93ZDGhqaED2Pu374SOeUkBfuvkLuiTdwzg= -tags.cncf.io/container-device-interface v0.6.2/go.mod h1:Shusyhjs1A5Na/kqPVLL0KqnHQHuunol9LFeUNkuGVE= -tags.cncf.io/container-device-interface/specs-go v0.6.0 h1:V+tJJN6dqu8Vym6p+Ru+K5mJ49WL6Aoc5SJFSY0RLsQ= -tags.cncf.io/container-device-interface/specs-go v0.6.0/go.mod h1:hMAwAbMZyBLdmYqWgYcKH0F/yctNpV3P35f+/088A80= +src.elv.sh v0.16.0-rc1.0.20220116211855-fda62502ad7f/go.mod h1:kPbhv5+fBeUh85nET3wWhHGUaUQ64nZMJ8FwA5v5Olg= +tags.cncf.io/container-device-interface v0.7.2 h1:MLqGnWfOr1wB7m08ieI4YJ3IoLKKozEnnNYBtacDPQU= +tags.cncf.io/container-device-interface v0.7.2/go.mod h1:Xb1PvXv2BhfNb3tla4r9JL129ck1Lxv9KuU6eVOfKto= +tags.cncf.io/container-device-interface/specs-go v0.7.0 h1:w/maMGVeLP6TIQJVYT5pbqTi8SCw/iHZ+n4ignuGHqg= +tags.cncf.io/container-device-interface/specs-go v0.7.0/go.mod h1:hMAwAbMZyBLdmYqWgYcKH0F/yctNpV3P35f+/088A80= diff --git a/hack/bats b/hack/bats index ba26e0444a..3002c0199b 100755 --- a/hack/bats +++ b/hack/bats @@ -64,9 +64,6 @@ Examples: export PODMAN=${PODMAN:-$(pwd)/bin/podman} export QUADLET=${QUADLET:-$(pwd)/bin/quadlet} -# Because 'make' doesn't do this by default -chcon -t container_runtime_exec_t $PODMAN - # Directory in which TESTS=test/system @@ -128,6 +125,9 @@ fi # Used in 120-load test to identify rootless destination for podman image scp export PODMAN_ROOTLESS_USER=$(id -un) +# Make sure to always check for leaks when running locally +export PODMAN_BATS_LEAK_CHECK=1 + # Root if [[ "$TEST_ROOT" ]]; then echo "# bats ${bats_filter[*]} $TESTS" diff --git a/hack/golangci-lint.sh b/hack/golangci-lint.sh index c7b9c6e764..8803b68bd3 100755 --- a/hack/golangci-lint.sh +++ b/hack/golangci-lint.sh @@ -15,11 +15,11 @@ set -e BUILD_TAGS_DEFAULT="apparmor,seccomp,selinux" BUILD_TAGS_ABI="$BUILD_TAGS_DEFAULT,systemd" BUILD_TAGS_TUNNEL="$BUILD_TAGS_DEFAULT,remote" -BUILD_TAGS_REMOTE="remote" +BUILD_TAGS_REMOTE="remote,containers_image_openpgp" SKIP_DIRS_ABI="" -SKIP_DIRS_TUNNEL="pkg/api,pkg/domain/infra/abi" -SKIP_DIRS_REMOTE="libpod/events,pkg/api,pkg/domain/infra/abi,pkg/machine/qemu,pkg/trust,test" +SKIP_DIRS_TUNNEL="pkg/api,pkg/domain/infra/abi,internal/domain/infra/abi" +SKIP_DIRS_REMOTE="libpod/events,pkg/api,pkg/domain/infra/abi,internal/domain/infra/abi,pkg/machine/qemu,pkg/trust,test" declare -a to_lint to_lint=(ABI TUNNEL REMOTE) @@ -40,6 +40,6 @@ for to_lint in "${to_lint[@]}"; do # Make it really easy for a developer to copy-paste the command-line # to focus or debug a single, specific linting category. set -x - ./bin/golangci-lint run --build-tags="${!tags_var}" --skip-dirs="${!skip_var}" "$@" + ./bin/golangci-lint run --timeout=10m --build-tags="${!tags_var}" --exclude-dirs="${!skip_var}" "$@" ) done diff --git a/hack/install_swagger.sh b/hack/install_swagger.sh deleted file mode 100755 index 7954ebf264..0000000000 --- a/hack/install_swagger.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash - -# This script is intended to be a convenience, to be called from the -# Makefile `.install.swagger` target. Any other usage is not recommended. - -BIN="$BINDIR/swagger" - -die() { echo "${1:-No error message given} (from $(basename $0))"; exit 1; } - -function install() { - echo "Installing swagger v$VERSION into $BIN" - curl -sS --retry 5 --location -o $BIN \ - https://github.com/go-swagger/go-swagger/releases/download/v$VERSION/swagger_${GOOS}_${GOARCH} - chmod +x $BIN - $BIN version -} - -for req_var in VERSION BINDIR GOOS GOARCH; do - [[ -n "${!req_var}" ]] || die "\$$req_var is empty or undefined" -done - -if [ ! -x "$BIN" ]; then - install -else - $BIN version | grep "$VERSION" - if [[ "$?" -eq 0 ]]; then - echo "Using existing $BIN" - else - install - fi -fi diff --git a/hack/xref-helpmsgs-manpages b/hack/xref-helpmsgs-manpages index 6cd1d1cd9b..e62f068c8c 100755 --- a/hack/xref-helpmsgs-manpages +++ b/hack/xref-helpmsgs-manpages @@ -52,7 +52,7 @@ my $Format_Exceptions = <<'END_EXCEPTIONS'; # Deep internal structs; pretty sure these are permanent exceptions events .Details history .ImageHistoryLayer -images .ImageSummary +images .Arch .ImageSummary .Os .IsManifestList network-ls .Network # FIXME: this one, maybe? But someone needs to write the text @@ -203,6 +203,8 @@ sub xref_by_help { OPTION: for my $k (sort keys %$help) { + next if $k =~ /^_/; # metadata ("_desc"). Ignore. + if (! ref($man)) { # Super-unlikely but I've seen it warn "$ME: 'podman @subcommand' is not documented in man pages!\n"; @@ -354,16 +356,38 @@ sub xref_by_man { ############## # xref_rst # Cross-check *.rst files against help ############## +# +# This makes a pass over top-level commands only. There is no rst +# documentation for any podman subcommands. +# sub xref_rst { - my ($help, $rst, @subcommand) = @_; + my ($help, $rst) = @_; + - # Cross-check against rst (but only subcommands, not options). # We key on $help because that is Absolute Truth: anything in podman --help - # must be referenced in an rst (the converse is not true). - for my $k (sort grep { $_ !~ /^-/ } keys %$help) { - # Check for subcommands, if any (eg podman system -> connection -> add) - if (ref $help->{$k}) { - xref_rst($help->{$k}, $rst->{$k}, @subcommand, $k); + # must be referenced in an rst (the converse is not necessarily true) + for my $k (sort grep { $_ !~ /^[_-]/ } keys %$help) { + if (exists $rst->{$k}) { + # Descriptions must match + if ($rst->{$k}{_desc} ne $help->{$k}{_desc}) { + warn "$ME: podman $k: inconsistent description in $rst->{$k}{_source}:\n"; + warn " help: '$help->{$k}{_desc}'\n"; + warn " rst: '$rst->{$k}{_desc}'\n"; + ++$Errs; + } + } + else { + warn "$ME: Not found in rst: $k\n"; + ++$Errs; + } + } + + # Now the other way around: look for anything in Commands.rst that is + # not in podman --help + for my $k (sort grep { $rst->{$_}{_source} =~ /Commands.rst/ } keys %$rst) { + if ($k ne 'Podman' && ! exists $help->{$k}) { + warn "$ME: 'podman $k' found in $rst->{$k}{_source} but not 'podman help'\n"; + ++$Errs; } } } @@ -381,7 +405,17 @@ sub podman_help { or die "$ME: Cannot fork: $!\n"; my $section = ''; while (my $line = <$fh>) { + chomp $line; + + # First line of --help is a short command description. We compare it + # (in a later step) against the blurb in Commands.rst. + # FIXME: we should crossref against man pages, but as of 2024-03-18 + # it would be way too much work to get those aligned. + $help{_desc} //= $line; + # Cobra is blessedly consistent in its output: + # [command blurb] + # Description: ... # Usage: ... # Available Commands: # .... @@ -840,14 +874,8 @@ sub podman_rst { if ($command eq 'Commands') { ; } - elsif ($command eq 'managecontainers') { - $subcommand_href = $rst{container} //= { }; - } - elsif ($command eq 'connection') { - $subcommand_href = $rst{system}{connection} //= { }; - } else { - $subcommand_href = $rst{$command} //= { }; + $subcommand_href = $rst{$command} //= { _source => $rst }; } my $previous_subcommand = ''; @@ -855,15 +883,20 @@ sub podman_rst { if ($line =~ /^:doc:`(\S+)\s+<(.*?)>`\s+(.*)/) { my ($subcommand, $target, $desc) = ($1, $2, $3); - # Check that entries are in alphabetical order + # Check that entries are in alphabetical order, and not dups if ($subcommand lt $previous_subcommand) { warn "$ME: $rst:$.: '$previous_subcommand' and '$subcommand' are out of order\n"; ++$Errs; } + if ($subcommand eq $previous_subcommand) { + warn "$ME: $rst:$.: duplicate '$subcommand'\n"; + ++$Errs; + } $previous_subcommand = $subcommand; # Mark this subcommand as documented. $subcommand_href->{$subcommand}{_desc} = $desc; + $subcommand_href->{$subcommand}{_source} = $rst; # Check for invalid links. These will be one of two forms: # -> markdown/foo.1.md @@ -873,10 +906,20 @@ sub podman_rst { warn "$ME: $rst:$.: '$subcommand' links to nonexistent $target\n"; ++$Errs; } + + my $expect = "markdown/podman-$subcommand.1"; + if ($subcommand eq 'Podman') { + $expect = "markdown/podman.1"; + } + if ($target ne $expect) { + warn "$ME: $rst:$.: '$subcommand' links to $target (expected '$expect')\n"; + ++$Errs; + } } else { if (! -e "$Docs_Path/$target.rst") { warn "$ME: $rst:$.: '$subcommand' links to nonexistent $target.rst\n"; + ++$Errs; } } } diff --git a/hack/xref-helpmsgs-manpages.t b/hack/xref-helpmsgs-manpages.t index a625f39089..af21e2aee6 100644 --- a/hack/xref-helpmsgs-manpages.t +++ b/hack/xref-helpmsgs-manpages.t @@ -130,7 +130,7 @@ $mclone->{events}{"--format"}{".Attributes"} = 0; $mclone->{events}{"--format"}{".Image"} = '...'; $mclone->{events}{"--format"}{".Status"} = 1; $hclone->{events}{"--format"}{".Status"} = '...'; -$mclone->{events}{"--format"}{".ToHumanReadable"} = 3; +$mclone->{pod}{ps}{"--format"}{".Label"} = 3; $mclone->{ps}{"--format"}{".Label"} = 0; # --format is documented, with a table, but one entry missing delete $mclone->{events}{"--format"}{".Type"}; @@ -154,11 +154,11 @@ test_xref("xref_by_help() injection", $hclone, $mclone, "'podman events --format {{.Attributes' is a nested structure. Please add '...' to man page.", "'podman events --format {{.Image' is a simple value, not a nested structure. Please remove '...' from man page.", "'podman events --format {{.Status' is a nested structure, but the man page documents it as a function?!?", - "'podman events --format {{.ToHumanReadable' is a function that calls for 1 args; the man page lists 3. Please fix the man page.", "'podman events --format ' lists '.Type', which is not in podman-events.1.md", "'podman --help' lists 'new-command-in-help', which is not in podman.1.md", "'podman partlydocumented' is not documented in man pages!", "'podman pod inspect --help' lists '-l', which is not in podman-pod-inspect.1.md", + "'podman pod ps --format {{.Label' is a function that calls for 1 args; the man page lists 3. Please fix the man page.", "'podman ps --format {{.Label' is a function that calls for 1 args. Please investigate what those are, then add them to the man page. E.g., '.Label *bool*' or '.Label *path* *bool*'", "'podman secret --help' lists 'subcommand-in-help', which is not in podman-secret.1.md", ], diff --git a/internal/domain/entities/engine_testing.go b/internal/domain/entities/engine_testing.go new file mode 100644 index 0000000000..9ad9ee18bb --- /dev/null +++ b/internal/domain/entities/engine_testing.go @@ -0,0 +1,27 @@ +package entities + +import ( + "context" +) + +type TestingEngine interface { //nolint:interfacebloat + CreateStorageLayer(ctx context.Context, opts CreateStorageLayerOptions) (*CreateStorageLayerReport, error) + CreateLayer(ctx context.Context, opts CreateLayerOptions) (*CreateLayerReport, error) + CreateLayerData(ctx context.Context, opts CreateLayerDataOptions) (*CreateLayerDataReport, error) + CreateImage(ctx context.Context, opts CreateImageOptions) (*CreateImageReport, error) + CreateImageData(ctx context.Context, opts CreateImageDataOptions) (*CreateImageDataReport, error) + CreateContainer(ctx context.Context, opts CreateContainerOptions) (*CreateContainerReport, error) + CreateContainerData(ctx context.Context, opts CreateContainerDataOptions) (*CreateContainerDataReport, error) + ModifyLayer(ctx context.Context, opts ModifyLayerOptions) (*ModifyLayerReport, error) + PopulateLayer(ctx context.Context, opts PopulateLayerOptions) (*PopulateLayerReport, error) + RemoveStorageLayer(ctx context.Context, opts RemoveStorageLayerOptions) (*RemoveStorageLayerReport, error) + RemoveLayer(ctx context.Context, opts RemoveLayerOptions) (*RemoveLayerReport, error) + RemoveImage(ctx context.Context, opts RemoveImageOptions) (*RemoveImageReport, error) + RemoveContainer(ctx context.Context, opts RemoveContainerOptions) (*RemoveContainerReport, error) + RemoveLayerData(ctx context.Context, opts RemoveLayerDataOptions) (*RemoveLayerDataReport, error) + RemoveImageData(ctx context.Context, opts RemoveImageDataOptions) (*RemoveImageDataReport, error) + RemoveContainerData(ctx context.Context, opts RemoveContainerDataOptions) (*RemoveContainerDataReport, error) + ModifyLayerData(ctx context.Context, opts ModifyLayerDataOptions) (*ModifyLayerDataReport, error) + ModifyImageData(ctx context.Context, opts ModifyImageDataOptions) (*ModifyImageDataReport, error) + ModifyContainerData(ctx context.Context, opts ModifyContainerDataOptions) (*ModifyContainerDataReport, error) +} diff --git a/internal/domain/entities/testing.go b/internal/domain/entities/testing.go new file mode 100644 index 0000000000..754c4927e7 --- /dev/null +++ b/internal/domain/entities/testing.go @@ -0,0 +1,153 @@ +package entities + +type CreateStorageLayerOptions struct { + Parent string + ID string + ContentsArchive []byte +} + +type CreateStorageLayerReport struct { + ID string +} + +type CreateLayerOptions struct { + Parent string + ID string +} + +type CreateLayerReport struct { + ID string +} + +type CreateLayerDataOptions struct { + ID string + Data map[string][]byte +} + +type CreateLayerDataReport struct{} + +type CreateImageOptions struct { + Layer string + Names []string + ID string +} + +type CreateImageReport struct { + ID string +} + +type CreateImageDataOptions struct { + ID string + Data map[string][]byte +} + +type CreateImageDataReport struct{} + +type CreateContainerOptions struct { + Layer string + Image string + Names []string + ID string +} + +type CreateContainerReport struct { + ID string +} + +type CreateContainerDataOptions struct { + ID string + Data map[string][]byte +} + +type CreateContainerDataReport struct{} + +type ModifyLayerOptions struct { + ID string + ContentsArchive []byte +} + +type ModifyLayerReport struct{} + +type PopulateLayerOptions struct { + ID string + ContentsArchive []byte +} + +type PopulateLayerReport struct{} + +type RemoveStorageLayerOptions struct { + ID string +} + +type RemoveStorageLayerReport struct { + ID string +} + +type RemoveLayerOptions struct { + ID string +} + +type RemoveLayerReport struct { + ID string +} + +type RemoveImageOptions struct { + ID string +} + +type RemoveImageReport struct { + ID string +} + +type RemoveContainerOptions struct { + ID string +} + +type RemoveContainerReport struct { + ID string +} + +type RemoveLayerDataOptions struct { + ID string + Key string +} + +type RemoveLayerDataReport struct{} + +type RemoveImageDataOptions struct { + ID string + Key string +} + +type RemoveImageDataReport struct{} + +type RemoveContainerDataOptions struct { + ID string + Key string +} + +type RemoveContainerDataReport struct{} + +type ModifyLayerDataOptions struct { + ID string + Key string + Data []byte +} + +type ModifyLayerDataReport struct{} + +type ModifyImageDataOptions struct { + ID string + Key string + Data []byte +} + +type ModifyImageDataReport struct{} + +type ModifyContainerDataOptions struct { + ID string + Key string + Data []byte +} + +type ModifyContainerDataReport struct{} diff --git a/internal/domain/infra/abi/testing.go b/internal/domain/infra/abi/testing.go new file mode 100644 index 0000000000..4cf49d0801 --- /dev/null +++ b/internal/domain/infra/abi/testing.go @@ -0,0 +1,220 @@ +package abi + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + + "github.com/containers/image/v5/manifest" + "github.com/containers/podman/v5/internal/domain/entities" + "github.com/containers/podman/v5/libpod" + "github.com/containers/storage" + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/stringid" +) + +type TestingEngine struct { + Libpod *libpod.Runtime + Store storage.Store +} + +func (te *TestingEngine) CreateStorageLayer(ctx context.Context, opts entities.CreateStorageLayerOptions) (*entities.CreateStorageLayerReport, error) { + driver, err := te.Store.GraphDriver() + if err != nil { + return nil, err + } + id := opts.ID + if id == "" { + id = stringid.GenerateNonCryptoID() + } + if err := driver.CreateReadWrite(id, opts.Parent, &graphdriver.CreateOpts{}); err != nil { + return nil, err + } + return &entities.CreateStorageLayerReport{ID: id}, nil +} + +func (te *TestingEngine) CreateLayer(ctx context.Context, opts entities.CreateLayerOptions) (*entities.CreateLayerReport, error) { + layer, err := te.Store.CreateLayer(opts.ID, opts.Parent, nil, "", true, nil) + if err != nil { + return nil, err + } + return &entities.CreateLayerReport{ID: layer.ID}, nil +} + +func (te *TestingEngine) CreateLayerData(ctx context.Context, opts entities.CreateLayerDataOptions) (*entities.CreateLayerDataReport, error) { + for key, data := range opts.Data { + if err := te.Store.SetLayerBigData(opts.ID, key, bytes.NewReader(data)); err != nil { + return nil, err + } + } + return &entities.CreateLayerDataReport{}, nil +} + +func (te *TestingEngine) ModifyLayer(ctx context.Context, opts entities.ModifyLayerOptions) (*entities.ModifyLayerReport, error) { + mnt, err := te.Store.Mount(opts.ID, "") + if err != nil { + return nil, err + } + modifyError := chrootarchive.UntarWithRoot(bytes.NewReader(opts.ContentsArchive), mnt, nil, mnt) + if _, err := te.Store.Unmount(opts.ID, false); err != nil { + return nil, err + } + if modifyError != nil { + return nil, modifyError + } + return &entities.ModifyLayerReport{}, nil +} + +func (te *TestingEngine) PopulateLayer(ctx context.Context, opts entities.PopulateLayerOptions) (*entities.PopulateLayerReport, error) { + if _, err := te.Store.ApplyDiff(opts.ID, bytes.NewReader(opts.ContentsArchive)); err != nil { + return nil, err + } + return &entities.PopulateLayerReport{}, nil +} + +func (te *TestingEngine) CreateImage(ctx context.Context, opts entities.CreateImageOptions) (*entities.CreateImageReport, error) { + image, err := te.Store.CreateImage(opts.ID, opts.Names, opts.Layer, "", nil) + if err != nil { + return nil, err + } + return &entities.CreateImageReport{ID: image.ID}, nil +} + +func (te *TestingEngine) CreateImageData(ctx context.Context, opts entities.CreateImageDataOptions) (*entities.CreateImageDataReport, error) { + for key, data := range opts.Data { + if err := te.Store.SetImageBigData(opts.ID, key, data, manifest.Digest); err != nil { + return nil, err + } + } + return &entities.CreateImageDataReport{}, nil +} + +func (te *TestingEngine) CreateContainer(ctx context.Context, opts entities.CreateContainerOptions) (*entities.CreateContainerReport, error) { + image, err := te.Store.CreateContainer(opts.ID, opts.Names, opts.Image, opts.Layer, "", nil) + if err != nil { + return nil, err + } + return &entities.CreateContainerReport{ID: image.ID}, nil +} + +func (te *TestingEngine) CreateContainerData(ctx context.Context, opts entities.CreateContainerDataOptions) (*entities.CreateContainerDataReport, error) { + for key, data := range opts.Data { + if err := te.Store.SetContainerBigData(opts.ID, key, data); err != nil { + return nil, err + } + } + return &entities.CreateContainerDataReport{}, nil +} + +func (te *TestingEngine) RemoveStorageLayer(ctx context.Context, opts entities.RemoveStorageLayerOptions) (*entities.RemoveStorageLayerReport, error) { + driver, err := te.Store.GraphDriver() + if err != nil { + return nil, err + } + if err := driver.Remove(opts.ID); err != nil { + return nil, err + } + return &entities.RemoveStorageLayerReport{ID: opts.ID}, nil +} + +func (te *TestingEngine) RemoveLayer(ctx context.Context, opts entities.RemoveLayerOptions) (*entities.RemoveLayerReport, error) { + if err := te.Store.Delete(opts.ID); err != nil { + return nil, err + } + return &entities.RemoveLayerReport{ID: opts.ID}, nil +} + +func (te *TestingEngine) RemoveImage(ctx context.Context, opts entities.RemoveImageOptions) (*entities.RemoveImageReport, error) { + if err := te.Store.Delete(opts.ID); err != nil { + return nil, err + } + return &entities.RemoveImageReport{ID: opts.ID}, nil +} + +func (te *TestingEngine) RemoveContainer(ctx context.Context, opts entities.RemoveContainerOptions) (*entities.RemoveContainerReport, error) { + if err := te.Store.Delete(opts.ID); err != nil { + return nil, err + } + return &entities.RemoveContainerReport{ID: opts.ID}, nil +} + +func (te *TestingEngine) datapath(itemType, id, key string) (string, error) { + switch itemType { + default: + return "", fmt.Errorf("unknown item type %q", itemType) + case "layer", "image", "container": + } + driverName := te.Store.GraphDriverName() + graphRoot := te.Store.GraphRoot() + datapath := filepath.Join(graphRoot, driverName+"-"+itemType+"s", id, key) // more or less accurate for keys whose names are [.a-z0-9]+ + return datapath, nil +} + +func (te *TestingEngine) RemoveLayerData(ctx context.Context, opts entities.RemoveLayerDataOptions) (*entities.RemoveLayerDataReport, error) { + datapath, err := te.datapath("layer", opts.ID, opts.Key) + if err != nil { + return nil, err + } + if err = os.Remove(datapath); err != nil { + return nil, err + } + return &entities.RemoveLayerDataReport{}, nil +} + +func (te *TestingEngine) RemoveImageData(ctx context.Context, opts entities.RemoveImageDataOptions) (*entities.RemoveImageDataReport, error) { + datapath, err := te.datapath("image", opts.ID, opts.Key) + if err != nil { + return nil, err + } + if err = os.Remove(datapath); err != nil { + return nil, err + } + return &entities.RemoveImageDataReport{}, nil +} + +func (te *TestingEngine) RemoveContainerData(ctx context.Context, opts entities.RemoveContainerDataOptions) (*entities.RemoveContainerDataReport, error) { + datapath, err := te.datapath("container", opts.ID, opts.Key) + if err != nil { + return nil, err + } + if err = os.Remove(datapath); err != nil { + return nil, err + } + return &entities.RemoveContainerDataReport{}, nil +} + +func (te *TestingEngine) ModifyLayerData(ctx context.Context, opts entities.ModifyLayerDataOptions) (*entities.ModifyLayerDataReport, error) { + datapath, err := te.datapath("layer", opts.ID, opts.Key) + if err != nil { + return nil, err + } + if err = os.WriteFile(datapath, opts.Data, 0o0600); err != nil { + return nil, err + } + return &entities.ModifyLayerDataReport{}, nil +} + +func (te *TestingEngine) ModifyImageData(ctx context.Context, opts entities.ModifyImageDataOptions) (*entities.ModifyImageDataReport, error) { + datapath, err := te.datapath("image", opts.ID, opts.Key) + if err != nil { + return nil, err + } + if err = os.WriteFile(datapath, opts.Data, 0o0600); err != nil { + return nil, err + } + return &entities.ModifyImageDataReport{}, nil +} + +func (te *TestingEngine) ModifyContainerData(ctx context.Context, opts entities.ModifyContainerDataOptions) (*entities.ModifyContainerDataReport, error) { + datapath, err := te.datapath("container", opts.ID, opts.Key) + if err != nil { + return nil, err + } + if err = os.WriteFile(datapath, opts.Data, 0o0600); err != nil { + return nil, err + } + return &entities.ModifyContainerDataReport{}, nil +} diff --git a/internal/domain/infra/abi/testing_test.go b/internal/domain/infra/abi/testing_test.go new file mode 100644 index 0000000000..75c6f4f542 --- /dev/null +++ b/internal/domain/infra/abi/testing_test.go @@ -0,0 +1,5 @@ +package abi + +import "github.com/containers/podman/v5/internal/domain/entities" + +var _ entities.TestingEngine = &TestingEngine{} diff --git a/internal/domain/infra/runtime_abi.go b/internal/domain/infra/runtime_abi.go new file mode 100644 index 0000000000..9dab3b190c --- /dev/null +++ b/internal/domain/infra/runtime_abi.go @@ -0,0 +1,26 @@ +//go:build !remote + +package infra + +import ( + "context" + "fmt" + + ientities "github.com/containers/podman/v5/internal/domain/entities" + "github.com/containers/podman/v5/internal/domain/infra/tunnel" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/domain/entities" +) + +// NewTestingEngine factory provides a libpod runtime for testing-specific operations +func NewTestingEngine(facts *entities.PodmanConfig) (ientities.TestingEngine, error) { + switch facts.EngineMode { + case entities.ABIMode: + r, err := NewLibpodTestingRuntime(facts.FlagSet, facts) + return r, err + case entities.TunnelMode: + ctx, err := bindings.NewConnectionWithIdentity(context.Background(), facts.URI, facts.Identity, facts.MachineMode) + return &tunnel.TestingEngine{ClientCtx: ctx}, err + } + return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode) +} diff --git a/internal/domain/infra/runtime_proxy.go b/internal/domain/infra/runtime_proxy.go new file mode 100644 index 0000000000..2e2600cd65 --- /dev/null +++ b/internal/domain/infra/runtime_proxy.go @@ -0,0 +1,26 @@ +//go:build !remote + +package infra + +import ( + "context" + + ientities "github.com/containers/podman/v5/internal/domain/entities" + "github.com/containers/podman/v5/internal/domain/infra/abi" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/infra" + "github.com/containers/storage" + flag "github.com/spf13/pflag" +) + +func NewLibpodTestingRuntime(flags *flag.FlagSet, opts *entities.PodmanConfig) (ientities.TestingEngine, error) { + r, err := infra.GetRuntime(context.Background(), flags, opts) + if err != nil { + return nil, err + } + store, err := storage.GetStore(r.StorageConfig()) + if err != nil { + return nil, err + } + return &abi.TestingEngine{Libpod: r, Store: store}, nil +} diff --git a/internal/domain/infra/runtime_tunnel.go b/internal/domain/infra/runtime_tunnel.go new file mode 100644 index 0000000000..5bade4eddc --- /dev/null +++ b/internal/domain/infra/runtime_tunnel.go @@ -0,0 +1,25 @@ +//go:build remote + +package infra + +import ( + "context" + "fmt" + + ientities "github.com/containers/podman/v5/internal/domain/entities" + "github.com/containers/podman/v5/internal/domain/infra/tunnel" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/domain/entities" +) + +// NewTestingEngine factory provides a libpod runtime for testing-specific operations +func NewTestingEngine(facts *entities.PodmanConfig) (ientities.TestingEngine, error) { + switch facts.EngineMode { + case entities.ABIMode: + return nil, fmt.Errorf("direct image runtime not supported") + case entities.TunnelMode: + ctx, err := bindings.NewConnectionWithIdentity(context.Background(), facts.URI, facts.Identity, facts.MachineMode) + return &tunnel.TestingEngine{ClientCtx: ctx}, err + } + return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode) +} diff --git a/internal/domain/infra/tunnel/testing.go b/internal/domain/infra/tunnel/testing.go new file mode 100644 index 0000000000..8efc6c3727 --- /dev/null +++ b/internal/domain/infra/tunnel/testing.go @@ -0,0 +1,88 @@ +package tunnel + +import ( + "context" + "syscall" + + "github.com/containers/podman/v5/internal/domain/entities" +) + +type TestingEngine struct { + ClientCtx context.Context +} + +func (te *TestingEngine) CreateStorageLayer(ctx context.Context, opts entities.CreateStorageLayerOptions) (*entities.CreateStorageLayerReport, error) { + return nil, syscall.ENOSYS +} + +func (te *TestingEngine) CreateLayer(ctx context.Context, opts entities.CreateLayerOptions) (*entities.CreateLayerReport, error) { + return nil, syscall.ENOSYS +} + +func (te *TestingEngine) CreateLayerData(ctx context.Context, opts entities.CreateLayerDataOptions) (*entities.CreateLayerDataReport, error) { + return nil, syscall.ENOSYS +} + +func (te *TestingEngine) ModifyLayer(ctx context.Context, opts entities.ModifyLayerOptions) (*entities.ModifyLayerReport, error) { + return nil, syscall.ENOSYS +} + +func (te *TestingEngine) PopulateLayer(ctx context.Context, opts entities.PopulateLayerOptions) (*entities.PopulateLayerReport, error) { + return nil, syscall.ENOSYS +} + +func (te *TestingEngine) RemoveStorageLayer(ctx context.Context, opts entities.RemoveStorageLayerOptions) (*entities.RemoveStorageLayerReport, error) { + return nil, syscall.ENOSYS +} + +func (te *TestingEngine) CreateImage(ctx context.Context, opts entities.CreateImageOptions) (*entities.CreateImageReport, error) { + return nil, syscall.ENOSYS +} + +func (te *TestingEngine) CreateImageData(ctx context.Context, opts entities.CreateImageDataOptions) (*entities.CreateImageDataReport, error) { + return nil, syscall.ENOSYS +} + +func (te *TestingEngine) RemoveLayer(ctx context.Context, opts entities.RemoveLayerOptions) (*entities.RemoveLayerReport, error) { + return nil, syscall.ENOSYS +} + +func (te *TestingEngine) RemoveImage(ctx context.Context, opts entities.RemoveImageOptions) (*entities.RemoveImageReport, error) { + return nil, syscall.ENOSYS +} + +func (te *TestingEngine) RemoveContainer(ctx context.Context, opts entities.RemoveContainerOptions) (*entities.RemoveContainerReport, error) { + return nil, syscall.ENOSYS +} + +func (te *TestingEngine) CreateContainer(ctx context.Context, opts entities.CreateContainerOptions) (*entities.CreateContainerReport, error) { + return nil, syscall.ENOSYS +} + +func (te *TestingEngine) CreateContainerData(ctx context.Context, opts entities.CreateContainerDataOptions) (*entities.CreateContainerDataReport, error) { + return nil, syscall.ENOSYS +} + +func (te *TestingEngine) RemoveLayerData(ctx context.Context, opts entities.RemoveLayerDataOptions) (*entities.RemoveLayerDataReport, error) { + return nil, syscall.ENOSYS +} + +func (te *TestingEngine) RemoveImageData(ctx context.Context, opts entities.RemoveImageDataOptions) (*entities.RemoveImageDataReport, error) { + return nil, syscall.ENOSYS +} + +func (te *TestingEngine) RemoveContainerData(ctx context.Context, opts entities.RemoveContainerDataOptions) (*entities.RemoveContainerDataReport, error) { + return nil, syscall.ENOSYS +} + +func (te *TestingEngine) ModifyLayerData(ctx context.Context, opts entities.ModifyLayerDataOptions) (*entities.ModifyLayerDataReport, error) { + return nil, syscall.ENOSYS +} + +func (te *TestingEngine) ModifyImageData(ctx context.Context, opts entities.ModifyImageDataOptions) (*entities.ModifyImageDataReport, error) { + return nil, syscall.ENOSYS +} + +func (te *TestingEngine) ModifyContainerData(ctx context.Context, opts entities.ModifyContainerDataOptions) (*entities.ModifyContainerDataReport, error) { + return nil, syscall.ENOSYS +} diff --git a/internal/domain/infra/tunnel/testing_test.go b/internal/domain/infra/tunnel/testing_test.go new file mode 100644 index 0000000000..d8cfa120f4 --- /dev/null +++ b/internal/domain/infra/tunnel/testing_test.go @@ -0,0 +1,5 @@ +package tunnel + +import "github.com/containers/podman/v5/internal/domain/entities" + +var _ entities.TestingEngine = &TestingEngine{} diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index 44428df087..45fd80b79c 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -16,6 +16,7 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/podman/v5/libpod/define" + "github.com/containers/storage/pkg/fileutils" "github.com/sirupsen/logrus" bolt "go.etcd.io/bbolt" ) @@ -86,7 +87,7 @@ func NewBoltState(path string, runtime *Runtime) (State, error) { // To continue testing in CI, allow creation iff an undocumented env // var is set. if os.Getenv("CI_DESIRED_DATABASE") != "boltdb" { - if _, err := os.Stat(path); err != nil && errors.Is(err, fs.ErrNotExist) { + if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) { return nil, fmt.Errorf("the BoltDB backend has been deprecated, no new BoltDB databases can be created: %w", define.ErrInvalidArg) } } else { @@ -1469,7 +1470,7 @@ func (s *BoltState) GetContainerExitCodeTimeStamp(id string) (*time.Time, error) }) } -// PruneExitCodes removes exit codes older than 5 minutes unless the associated +// PruneContainerExitCodes removes exit codes older than 5 minutes unless the associated // container still exists. func (s *BoltState) PruneContainerExitCodes() error { if !s.valid { diff --git a/libpod/container.go b/libpod/container.go index 45ec75a2f6..3619b82c3f 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -12,6 +12,7 @@ import ( "strings" "time" + "github.com/containers/common/libnetwork/pasta" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/secrets" @@ -127,6 +128,7 @@ type Container struct { restoreFromCheckpoint bool slirp4netnsSubnet *net.IPNet + pastaResult *pasta.SetupResult } // ContainerState contains the current state of the container @@ -207,6 +209,9 @@ type ContainerState struct { // healthcheck. The container will be restarted if this exceed a set // number in the startup HC config. StartupHCFailureCount int `json:"startupHCFailureCount,omitempty"` + // HCUnitName records the name of the healthcheck unit. + // Automatically generated when the healthcheck is started. + HCUnitName string `json:"hcUnitName,omitempty"` // ExtensionStageHooks holds hooks which will be executed by libpod // and not delegated to the OCI runtime. @@ -273,6 +278,8 @@ type ContainerImageVolume struct { Dest string `json:"dest"` // ReadWrite sets the volume writable. ReadWrite bool `json:"rw"` + // SubPath determines which part of the image will be mounted into the container. + SubPath string `json:"subPath,omitempty"` } // ContainerSecret is a secret that is mounted in a container diff --git a/libpod/container_api.go b/libpod/container_api.go index 8b1d677d34..525183c751 100644 --- a/libpod/container_api.go +++ b/libpod/container_api.go @@ -115,16 +115,31 @@ func (c *Container) Start(ctx context.Context, recursive bool) (finalErr error) } // Start the container - return c.start(ctx) + if err := c.start(); err != nil { + return err + } + return c.waitForHealthy(ctx) } // Update updates the given container. -// only the cgroup config can be updated and therefore only a linux resource spec is passed. -func (c *Container) Update(res *spec.LinuxResources) error { - if err := c.syncContainer(); err != nil { - return err +// Either resource limits or restart policy can be updated. +// Either resourcs or restartPolicy must not be nil. +// If restartRetries is not nil, restartPolicy must be set and must be "on-failure". +func (c *Container) Update(resources *spec.LinuxResources, restartPolicy *string, restartRetries *uint) error { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return err + } + } + + if c.ensureState(define.ContainerStateRemoving) { + return fmt.Errorf("container %s is being removed, cannot update: %w", c.ID(), define.ErrCtrStateInvalid) } - return c.update(res) + + return c.update(resources, restartPolicy, restartRetries) } // StartAndAttach starts a container and attaches to it. @@ -172,7 +187,7 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *define.AttachSt // Attach to the container before starting it go func() { // Start resizing - if c.LogDriver() != define.PassthroughLogging { + if c.LogDriver() != define.PassthroughLogging && c.LogDriver() != define.PassthroughTTYLogging { registerResizeFunc(resize, c.bundlePath()) } @@ -182,6 +197,9 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *define.AttachSt opts.Start = true opts.Started = startedChan + // attach and start the container on a different thread. waitForHealthy must + // be done later, as it requires to run on the same thread that holds the lock + // for the container. if err := c.ociRuntime.Attach(c, opts); err != nil { attachChan <- err } @@ -195,7 +213,7 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *define.AttachSt c.newContainerEvent(events.Attach) } - return attachChan, nil + return attachChan, c.waitForHealthy(ctx) } // RestartWithTimeout restarts a running container and takes a given timeout in uint @@ -304,6 +322,9 @@ func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <- if c.LogDriver() == define.PassthroughLogging { return fmt.Errorf("this container is using the 'passthrough' log driver, cannot attach: %w", define.ErrNoLogs) } + if c.LogDriver() == define.PassthroughTTYLogging { + return fmt.Errorf("this container is using the 'passthrough-tty' log driver, cannot attach: %w", define.ErrNoLogs) + } if !c.batched { c.lock.Lock() if err := c.syncContainer(); err != nil { @@ -336,7 +357,7 @@ func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <- } // Start resizing - if c.LogDriver() != define.PassthroughLogging { + if c.LogDriver() != define.PassthroughLogging && c.LogDriver() != define.PassthroughTTYLogging { registerResizeFunc(resize, c.bundlePath()) } @@ -560,11 +581,16 @@ func (c *Container) Wait(ctx context.Context) (int32, error) { // WaitForExit blocks until the container exits and returns its exit code. The // argument is the interval at which checks the container's status. func (c *Container) WaitForExit(ctx context.Context, pollInterval time.Duration) (int32, error) { + id := c.ID() if !c.valid { + // if the container is not valid at this point as it was deleted, + // check if the exit code was recorded in the db. + exitCode, err := c.runtime.state.GetContainerExitCode(id) + if err == nil { + return exitCode, nil + } return -1, define.ErrCtrRemoved } - - id := c.ID() var conmonTimer time.Timer conmonTimerSet := false @@ -746,7 +772,7 @@ func (c *Container) WaitForConditionWithInterval(ctx context.Context, waitTimeou wg.Add(1) go func() { defer wg.Done() - + stoppedCount := 0 for { if len(wantedStates) > 0 { state, err := c.State() @@ -760,6 +786,21 @@ func (c *Container) WaitForConditionWithInterval(ctx context.Context, waitTimeou } } if len(wantedHealthStates) > 0 { + // even if we are interested only in the health check + // check that the container is still running to avoid + // waiting until the timeout expires. + if stoppedCount > 0 { + stoppedCount++ + } else { + state, err := c.State() + if err != nil { + trySend(-1, err) + return + } + if state != define.ContainerStateCreated && state != define.ContainerStateRunning && state != define.ContainerStatePaused { + stoppedCount++ + } + } status, err := c.HealthCheckStatus() if err != nil { trySend(-1, err) @@ -769,6 +810,12 @@ func (c *Container) WaitForConditionWithInterval(ctx context.Context, waitTimeou trySend(-1, nil) return } + // wait for another waitTimeout interval to give the health check process some time + // to record the healthy status. + if stoppedCount > 1 { + trySend(-1, define.ErrCtrStopped) + return + } } select { case <-ctx.Done(): diff --git a/libpod/container_config.go b/libpod/container_config.go index 262a4befa0..8c4e0176c5 100644 --- a/libpod/container_config.go +++ b/libpod/container_config.go @@ -349,7 +349,7 @@ type ContainerMiscConfig struct { Labels map[string]string `json:"labels,omitempty"` // StopSignal is the signal that will be used to stop the container StopSignal uint `json:"stopSignal,omitempty"` - // StopTimeout is the signal that will be used to stop the container + // StopTimeout is maximum time a container is allowed to run after getting the stop signal StopTimeout uint `json:"stopTimeout,omitempty"` // Timeout is maximum time a container will run before getting the kill signal Timeout uint `json:"timeout,omitempty"` diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go index de1d059ecf..ef4bac14e4 100644 --- a/libpod/container_inspect.go +++ b/libpod/container_inspect.go @@ -9,6 +9,7 @@ import ( "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/libpod/driver" + "github.com/containers/podman/v5/pkg/signal" "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage/types" "github.com/docker/go-units" @@ -388,7 +389,7 @@ func (c *Container) generateInspectContainerConfig(spec *spec.Spec) *define.Insp // Leave empty if not explicitly overwritten by user if len(c.config.Entrypoint) != 0 { - ctrConfig.Entrypoint = strings.Join(c.config.Entrypoint, " ") + ctrConfig.Entrypoint = c.config.Entrypoint } if len(c.config.Labels) != 0 { @@ -404,8 +405,7 @@ func (c *Container) generateInspectContainerConfig(spec *spec.Spec) *define.Insp ctrConfig.Annotations[k] = v } } - - ctrConfig.StopSignal = c.config.StopSignal + ctrConfig.StopSignal = signal.ToDockerFormat(c.config.StopSignal) // TODO: should JSON deep copy this to ensure internal pointers don't // leak. ctrConfig.Healthcheck = c.config.HealthCheckConfig @@ -467,6 +467,9 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named restartPolicy := new(define.InspectRestartPolicy) restartPolicy.Name = c.config.RestartPolicy + if restartPolicy.Name == "" { + restartPolicy.Name = define.RestartPolicyNo + } restartPolicy.MaximumRetryCount = c.config.RestartRetries hostConfig.RestartPolicy = restartPolicy if c.config.NoCgroups { @@ -506,6 +509,10 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named // Annotations if ctrSpec.Annotations != nil { + if len(ctrSpec.Annotations) != 0 { + hostConfig.Annotations = ctrSpec.Annotations + } + hostConfig.ContainerIDFile = ctrSpec.Annotations[define.InspectAnnotationCIDFile] if ctrSpec.Annotations[define.InspectAnnotationAutoremove] == define.InspectResponseTrue { hostConfig.AutoRemove = true diff --git a/libpod/container_internal.go b/libpod/container_internal.go index a7d07da537..a9c076085a 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -11,8 +11,11 @@ import ( "io/fs" "os" "path/filepath" + "slices" "strconv" "strings" + "sync" + "syscall" "time" metadata "github.com/checkpoint-restore/checkpointctl/lib" @@ -38,6 +41,7 @@ import ( "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage" "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idmap" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/lockfile" @@ -48,7 +52,6 @@ import ( "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" "golang.org/x/sys/unix" ) @@ -204,7 +207,7 @@ func (c *Container) handleExitFile(exitFile string, fi os.FileInfo) error { if err != nil { return err } - if _, err = os.Stat(oomFilePath); err == nil { + if err = fileutils.Exists(oomFilePath); err == nil { c.state.OOMKilled = true } @@ -271,7 +274,9 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err } if c.config.HealthCheckConfig != nil { - if err := c.removeTransientFiles(ctx, c.config.StartupHealthCheckConfig != nil && !c.state.StartupHCPassed); err != nil { + if err := c.removeTransientFiles(ctx, + c.config.StartupHealthCheckConfig != nil && !c.state.StartupHCPassed, + c.state.HCUnitName); err != nil { return false, err } } @@ -301,29 +306,23 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err } }() - // Now this is a bit of a mess, normally we try to reuse the netns but if a userns - // is used this is not possible as it must be owned by the userns which is created - // by the oci runtime. Thus we need to teardown the netns so that the runtime - // creates the users+netns and then we setup in completeNetworkSetup() again. - if c.config.PostConfigureNetNS { - if err := c.cleanupNetwork(); err != nil { - return false, err - } + // Always teardown the network, trying to reuse the netns has caused + // a significant amount of bugs in this code here. It also never worked + // for containers with user namespaces. So once and for all simplify this + // by never reusing the netns. Originally this was done to have a faster + // restart of containers but with netavark now we are much faster so it + // shouldn't be that noticeable in practice. It also makes more sense to + // reconfigure the netns as it is likely that the container exited due + // some broken network state in which case reusing would just cause more + // harm than good. + if err := c.cleanupNetwork(); err != nil { + return false, err } if err := c.prepare(); err != nil { return false, err } - // only do this if the container is not in a userns, if we are the cleanupNetwork() - // was called above and a proper network setup is needed which is part of the init() below. - if !c.config.PostConfigureNetNS { - // set up slirp4netns again because slirp4netns will die when conmon exits - if err := c.setupRootlessNetwork(); err != nil { - return false, err - } - } - if c.state.State == define.ContainerStateStopped { // Reinitialize the container if we need to if err := c.reinit(ctx, true); err != nil { @@ -335,10 +334,10 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err return false, err } } - if err := c.start(ctx); err != nil { + if err := c.start(); err != nil { return false, err } - return true, nil + return true, c.waitForHealthy(ctx) } // Ensure that the container is in a specific state or state. @@ -547,16 +546,6 @@ func (c *Container) setupStorage(ctx context.Context) error { c.config.StaticDir = containerInfo.Dir c.state.RunDir = containerInfo.RunDir - if len(c.config.IDMappings.UIDMap) != 0 || len(c.config.IDMappings.GIDMap) != 0 { - if err := idtools.SafeChown(containerInfo.RunDir, c.RootUID(), c.RootGID()); err != nil { - return err - } - - if err := idtools.SafeChown(containerInfo.Dir, c.RootUID(), c.RootGID()); err != nil { - return err - } - } - // Set the default Entrypoint and Command if containerInfo.Config != nil { // Set CMD in the container to the default configuration only if ENTRYPOINT is not set by the user. @@ -666,6 +655,7 @@ func resetContainerState(state *ContainerState) { state.StartupHCPassed = false state.StartupHCSuccessCount = 0 state.StartupHCFailureCount = 0 + state.HCUnitName = "" state.NetNS = "" state.NetworkStatus = nil } @@ -1212,7 +1202,7 @@ func (c *Container) reinit(ctx context.Context, retainRetries bool) error { // Initialize (if necessary) and start a container // Performs all necessary steps to start a container that is not running -// Does not lock or check validity +// Does not lock or check validity, requires to run on the same thread that holds the lock for the container. func (c *Container) initAndStart(ctx context.Context) (retErr error) { // If we are ContainerStateUnknown, throw an error if c.state.State == define.ContainerStateUnknown { @@ -1257,11 +1247,14 @@ func (c *Container) initAndStart(ctx context.Context) (retErr error) { } // Now start the container - return c.start(ctx) + if err := c.start(); err != nil { + return err + } + return c.waitForHealthy(ctx) } // Internal, non-locking function to start a container -func (c *Container) start(ctx context.Context) error { +func (c *Container) start() error { if c.config.Spec.Process != nil { logrus.Debugf("Starting container %s with command %v", c.ID(), c.config.Spec.Process.Args) } @@ -1302,10 +1295,14 @@ func (c *Container) start(ctx context.Context) error { c.newContainerEvent(events.Start) - if err := c.save(); err != nil { - return err - } + return c.save() +} +// waitForHealthy, when sdNotifyMode == SdNotifyModeHealthy, waits up to the DefaultWaitInterval +// for the container to get into the healthy state and reports the status to the notify socket. +// The function unlocks the container lock, so it must be called from the same thread that locks +// the container. +func (c *Container) waitForHealthy(ctx context.Context) error { if c.config.SdNotifyMode != define.SdNotifyModeHealthy { return nil } @@ -1319,6 +1316,9 @@ func (c *Container) start(ctx context.Context) error { } if _, err := c.WaitForConditionWithInterval(ctx, DefaultWaitInterval, define.HealthCheckHealthy); err != nil { + if errors.Is(err, define.ErrNoSuchCtr) { + return nil + } return err } @@ -1330,10 +1330,8 @@ func (c *Container) start(ctx context.Context) error { return nil } -// Internal, non-locking function to stop container -func (c *Container) stop(timeout uint) error { - logrus.Debugf("Stopping ctr %s (timeout %d)", c.ID(), timeout) - +// Whether a container should use `all` when stopping +func (c *Container) stopWithAll() (bool, error) { // If the container is running in a PID Namespace, then killing the // primary pid is enough to kill the container. If it is not running in // a pid namespace then the OCI Runtime needs to kill ALL processes in @@ -1349,7 +1347,7 @@ func (c *Container) stop(timeout uint) error { // Only do this check if we need to unified, err := cgroups.IsCgroup2UnifiedMode() if err != nil { - return err + return false, err } if !unified { all = false @@ -1357,6 +1355,18 @@ func (c *Container) stop(timeout uint) error { } } + return all, nil +} + +// Internal, non-locking function to stop container +func (c *Container) stop(timeout uint) error { + logrus.Debugf("Stopping ctr %s (timeout %d)", c.ID(), timeout) + + all, err := c.stopWithAll() + if err != nil { + return err + } + // OK, the following code looks a bit weird but we have to make sure we can stop // containers with the restart policy always, to do this we have to set // StoppedByUser even when there is nothing to stop right now. This is due to the @@ -1446,6 +1456,58 @@ func (c *Container) waitForConmonToExitAndSave() error { return err } + // If we are still ContainerStateStopping, conmon exited without + // creating an exit file. Let's try and handle that here. + if c.state.State == define.ContainerStateStopping { + // Is container PID1 still alive? + if err := unix.Kill(c.state.PID, 0); err == nil { + // We have a runaway container, unmanaged by + // Conmon. Invoke OCI runtime stop. + // Use 0 timeout for immediate SIGKILL as things + // have gone seriously wrong. + // Ignore the error from stopWithAll, it's just + // a cgroup check - more important that we + // continue. + // If we wanted to be really fancy here, we + // could open a pidfd on container PID1 before + // this to get the real exit code... But I'm not + // that dedicated. + all, _ := c.stopWithAll() + if err := c.ociRuntime.StopContainer(c, 0, all); err != nil { + logrus.Errorf("Error stopping container %s after Conmon exited prematurely: %v", c.ID(), err) + } + } + + // Conmon is dead. Handle it. + c.state.State = define.ContainerStateStopped + c.state.PID = 0 + c.state.ConmonPID = 0 + c.state.FinishedTime = time.Now() + c.state.ExitCode = -1 + c.state.Exited = true + + c.state.Error = "conmon died without writing exit file, container exit code could not be retrieved" + + c.newContainerExitedEvent(c.state.ExitCode) + + if err := c.save(); err != nil { + logrus.Errorf("Error saving container %s state after Conmon exited prematurely: %v", c.ID(), err) + } + + if err := c.runtime.state.AddContainerExitCode(c.ID(), c.state.ExitCode); err != nil { + logrus.Errorf("Error saving container %s exit code after Conmon exited prematurely: %v", c.ID(), err) + } + + // No Conmon alive to trigger cleanup, and the calls in + // regular Podman are conditional on no errors. + // Need to clean up manually. + if err := c.cleanup(context.Background()); err != nil { + logrus.Errorf("Error cleaning up container %s after Conmon exited prematurely: %v", c.ID(), err) + } + + return fmt.Errorf("container %s conmon exited prematurely, exit code could not be retrieved: %w", c.ID(), define.ErrInternal) + } + return c.save() } @@ -1508,6 +1570,7 @@ func (c *Container) unpause() error { } // Internal, non-locking function to restart a container +// It requires to run on the same thread that holds the lock. func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (retErr error) { if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStateStopped, define.ContainerStateExited) { return fmt.Errorf("unable to restart a container in a paused or unknown state: %w", define.ErrCtrStateInvalid) @@ -1516,33 +1579,17 @@ func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (retEr c.newContainerEvent(events.Restart) if c.state.State == define.ContainerStateRunning { - conmonPID := c.state.ConmonPID if err := c.stop(timeout); err != nil { return err } if c.config.HealthCheckConfig != nil { - if err := c.removeTransientFiles(context.Background(), c.config.StartupHealthCheckConfig != nil && !c.state.StartupHCPassed); err != nil { + if err := c.removeTransientFiles(context.Background(), + c.config.StartupHealthCheckConfig != nil && !c.state.StartupHCPassed, + c.state.HCUnitName); err != nil { logrus.Error(err.Error()) } } - // Old versions of conmon have a bug where they create the exit file before - // closing open file descriptors causing a race condition when restarting - // containers with open ports since we cannot bind the ports as they're not - // yet closed by conmon. - // - // Killing the old conmon PID is ~okay since it forces the FDs of old conmons - // to be closed, while it's a NOP for newer versions which should have - // exited already. - if conmonPID != 0 { - // Ignore errors from FindProcess() as conmon could already have exited. - p, err := os.FindProcess(conmonPID) - if p != nil && err == nil { - if err = p.Kill(); err != nil { - logrus.Debugf("error killing conmon process: %v", err) - } - } - } // Ensure we tear down the container network so it will be // recreated - otherwise, behavior of restart differs from stop // and start @@ -1573,7 +1620,10 @@ func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (retEr return err } } - return c.start(ctx) + if err := c.start(); err != nil { + return err + } + return c.waitForHealthy(ctx) } // mountStorage sets up the container's root filesystem @@ -1847,6 +1897,7 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) // Set NeedsCopyUp to false since we are about to do first copy // Do not copy second time. vol.state.NeedsCopyUp = false + vol.state.CopiedUp = true if err := vol.save(); err != nil { return nil, err } @@ -1992,7 +2043,9 @@ func (c *Container) cleanup(ctx context.Context) error { // Remove healthcheck unit/timer file if it execs if c.config.HealthCheckConfig != nil { - if err := c.removeTransientFiles(ctx, c.config.StartupHealthCheckConfig != nil && !c.state.StartupHCPassed); err != nil { + if err := c.removeTransientFiles(ctx, + c.config.StartupHealthCheckConfig != nil && !c.state.StartupHCPassed, + c.state.HCUnitName); err != nil { logrus.Errorf("Removing timer for container %s healthcheck: %v", c.ID(), err) } } @@ -2005,7 +2058,7 @@ func (c *Container) cleanup(ctx context.Context) error { // cleanup host entry if it is shared if c.config.NetNsCtr != "" { if hoststFile, ok := c.state.BindMounts[config.DefaultHostsFile]; ok { - if _, err := os.Stat(hoststFile); err == nil { + if err := fileutils.Exists(hoststFile); err == nil { // we cannot use the dependency container lock due ABBA deadlocks if lock, err := lockfile.GetLockFile(hoststFile); err == nil { lock.Lock() @@ -2226,7 +2279,7 @@ func (c *Container) saveSpec(spec *spec.Spec) error { // Cannot guarantee some things, e.g. network namespaces, have the same // paths jsonPath := filepath.Join(c.bundlePath(), "config.json") - if _, err := os.Stat(jsonPath); err != nil { + if err := fileutils.Exists(jsonPath); err != nil { if !os.IsNotExist(err) { return fmt.Errorf("doing stat on container %s spec: %w", c.ID(), err) } @@ -2311,6 +2364,75 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (map[s return allHooks, nil } +// getRootPathForOCI returns the root path to use for the OCI runtime. +// If the current user is mapped in the container user namespace, then it returns +// the container's mountpoint directly from the storage. +// Otherwise, it returns an intermediate mountpoint that is accessible to anyone. +func (c *Container) getRootPathForOCI() (string, error) { + if hasCurrentUserMapped(c) { + return c.state.Mountpoint, nil + } + return c.getIntermediateMountpointUser() +} + +var ( + intermediateMountPoint string + intermediateMountPointErr error + intermediateMountPointSync sync.Mutex +) + +// getIntermediateMountpointUser returns a path that is accessible to everyone. It must be on TMPDIR since +// the runroot/tmpdir used by libpod are accessible only to the owner. +// To avoid TOCTOU issues, the path must be owned by the current user's UID and GID. +// The path can be used by different containers, so a mount must be created only in a private mount namespace. +func (c *Container) recreateIntermediateMountpointUser() (string, error) { + uid := os.Geteuid() + gid := os.Getegid() + for i := 0; ; i++ { + tmpDir := os.Getenv("TMPDIR") + if tmpDir == "" { + tmpDir = "/tmp" + } + dir := filepath.Join(tmpDir, fmt.Sprintf("intermediate-mountpoint-%d.%d", rootless.GetRootlessUID(), i)) + err := os.Mkdir(dir, 0755) + if err != nil { + if !errors.Is(err, os.ErrExist) { + return "", err + } + st, err2 := os.Stat(dir) + if err2 != nil { + return "", err + } + sys := st.Sys().(*syscall.Stat_t) + if !st.IsDir() || sys.Uid != uint32(uid) || sys.Gid != uint32(gid) { + continue + } + } + return dir, nil + } +} + +// getIntermediateMountpointUser returns a path that is accessible to everyone. +// To avoid TOCTOU issues, the path must be owned by the current user's UID and GID. +// The path can be used by different containers, so a mount must be created only in a private mount namespace. +func (c *Container) getIntermediateMountpointUser() (string, error) { + intermediateMountPointSync.Lock() + defer intermediateMountPointSync.Unlock() + + if intermediateMountPoint == "" || fileutils.Exists(intermediateMountPoint) != nil { + return c.recreateIntermediateMountpointUser() + } + + // update the timestamp to prevent systemd-tmpfiles from removing it + now := time.Now() + if err := os.Chtimes(intermediateMountPoint, now, now); err != nil { + if !errors.Is(err, os.ErrNotExist) { + return c.recreateIntermediateMountpointUser() + } + } + return intermediateMountPoint, intermediateMountPointErr +} + // mount mounts the container's root filesystem func (c *Container) mount() (string, error) { if c.state.State == define.ContainerStateRemoving { @@ -2369,8 +2491,7 @@ func (c *Container) checkReadyForRemoval() error { // canWithPrevious return the stat of the preCheckPoint dir func (c *Container) canWithPrevious() error { - _, err := os.Stat(c.PreCheckPointPath()) - return err + return fileutils.Exists(c.PreCheckPointPath()) } // prepareCheckpointExport writes the config and spec to @@ -2488,7 +2609,7 @@ func (c *Container) hasNamespace(namespace spec.LinuxNamespaceType) bool { return false } -// extractSecretToStorage copies a secret's data from the secrets manager to the container's static dir +// extractSecretToCtrStorage copies a secret's data from the secrets manager to the container's static dir func (c *Container) extractSecretToCtrStorage(secr *ContainerSecret) error { manager, err := c.runtime.SecretsManager() if err != nil { @@ -2520,11 +2641,77 @@ func (c *Container) extractSecretToCtrStorage(secr *ContainerSecret) error { return nil } -// update calls the ociRuntime update function to modify a cgroup config after container creation -func (c *Container) update(resources *spec.LinuxResources) error { - if err := c.ociRuntime.UpdateContainer(c, resources); err != nil { +// Update a container's resources or restart policy after creation. +// At least one of resources or restartPolicy must not be nil. +func (c *Container) update(resources *spec.LinuxResources, restartPolicy *string, restartRetries *uint) error { + if resources == nil && restartPolicy == nil { + return fmt.Errorf("must provide at least one of resources and restartPolicy to update a container: %w", define.ErrInvalidArg) + } + if restartRetries != nil && restartPolicy == nil { + return fmt.Errorf("must provide restart policy if updating restart retries: %w", define.ErrInvalidArg) + } + + oldResources := c.config.Spec.Linux.Resources + oldRestart := c.config.RestartPolicy + oldRetries := c.config.RestartRetries + + if restartPolicy != nil { + if err := define.ValidateRestartPolicy(*restartPolicy); err != nil { + return err + } + + if restartRetries != nil { + if *restartPolicy != define.RestartPolicyOnFailure { + return fmt.Errorf("cannot set restart policy retries unless policy is on-failure: %w", define.ErrInvalidArg) + } + } + + c.config.RestartPolicy = *restartPolicy + if restartRetries != nil { + c.config.RestartRetries = *restartRetries + } else { + c.config.RestartRetries = 0 + } + } + + if resources != nil { + if c.config.Spec.Linux == nil { + c.config.Spec.Linux = new(spec.Linux) + } + c.config.Spec.Linux.Resources = resources + } + + if err := c.runtime.state.SafeRewriteContainerConfig(c, "", "", c.config); err != nil { + // Assume DB write failed, revert to old resources block + c.config.Spec.Linux.Resources = oldResources + c.config.RestartPolicy = oldRestart + c.config.RestartRetries = oldRetries return err } + + if c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStatePaused) && resources != nil { + // So `podman inspect` on running containers sources its OCI spec from disk. + // To keep inspect accurate we need to update the on-disk OCI spec. + onDiskSpec, err := c.specFromState() + if err != nil { + return fmt.Errorf("retrieving on-disk OCI spec to update: %w", err) + } + if onDiskSpec.Linux == nil { + onDiskSpec.Linux = new(spec.Linux) + } + onDiskSpec.Linux.Resources = resources + if err := c.saveSpec(onDiskSpec); err != nil { + logrus.Errorf("Unable to update container %s OCI spec - `podman inspect` may not be accurate until container is restarted: %v", c.ID(), err) + } + + if err := c.ociRuntime.UpdateContainer(c, resources); err != nil { + return err + } + } + logrus.Debugf("updated container %s", c.ID()) + + c.newContainerEvent(events.Update) + return nil } diff --git a/libpod/container_internal_common.go b/libpod/container_internal_common.go index 02820ecbfa..47f2401c4c 100644 --- a/libpod/container_internal_common.go +++ b/libpod/container_internal_common.go @@ -7,12 +7,15 @@ import ( "errors" "fmt" "io" + "io/fs" "math" + "net" "os" "os/user" "path" "path/filepath" "runtime" + "slices" "strconv" "strings" "syscall" @@ -43,8 +46,10 @@ import ( "github.com/containers/podman/v5/pkg/util" "github.com/containers/podman/v5/version" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/lockfile" + "github.com/containers/storage/pkg/unshare" stypes "github.com/containers/storage/types" securejoin "github.com/cyphar/filepath-securejoin" runcuser "github.com/moby/sys/user" @@ -53,7 +58,6 @@ import ( "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" "golang.org/x/sys/unix" cdi "tags.cncf.io/container-device-interface/pkg/cdi" ) @@ -455,11 +459,23 @@ func (c *Container) generateSpec(ctx context.Context) (s *spec.Spec, cleanupFunc return nil, nil, fmt.Errorf("failed to create TempDir in the %s directory: %w", c.config.StaticDir, err) } + imagePath := mountPoint + if volume.SubPath != "" { + safeMount, err := c.safeMountSubPath(mountPoint, volume.SubPath) + if err != nil { + return nil, nil, err + } + + safeMounts = append(safeMounts, safeMount) + + imagePath = safeMount.mountPoint + } + var overlayMount spec.Mount if volume.ReadWrite { - overlayMount, err = overlay.Mount(contentDir, mountPoint, volume.Dest, c.RootUID(), c.RootGID(), c.runtime.store.GraphOptions()) + overlayMount, err = overlay.Mount(contentDir, imagePath, volume.Dest, c.RootUID(), c.RootGID(), c.runtime.store.GraphOptions()) } else { - overlayMount, err = overlay.MountReadOnly(contentDir, mountPoint, volume.Dest, c.RootUID(), c.RootGID(), c.runtime.store.GraphOptions()) + overlayMount, err = overlay.MountReadOnly(contentDir, imagePath, volume.Dest, c.RootUID(), c.RootGID(), c.runtime.store.GraphOptions()) } if err != nil { return nil, nil, fmt.Errorf("creating overlay mount for image %q failed: %w", volume.Source, err) @@ -549,9 +565,22 @@ func (c *Container) generateSpec(ctx context.Context) (s *spec.Spec, cleanupFunc return nil, nil, err } - g.SetRootPath(c.state.Mountpoint) + rootPath, err := c.getRootPathForOCI() + if err != nil { + return nil, nil, err + } + g.SetRootPath(rootPath) g.AddAnnotation("org.opencontainers.image.stopSignal", strconv.FormatUint(uint64(c.config.StopSignal), 10)) + if c.config.StopSignal != 0 { + g.AddAnnotation("org.systemd.property.KillSignal", strconv.FormatUint(uint64(c.config.StopSignal), 10)) + } + + if c.config.StopTimeout != 0 { + annotation := fmt.Sprintf("uint64 %d", c.config.StopTimeout*1000000) // sec to usec + g.AddAnnotation("org.systemd.property.TimeoutStopUSec", annotation) + } + if _, exists := g.Config.Annotations[annotations.ContainerManager]; !exists { g.AddAnnotation(annotations.ContainerManager, annotations.ContainerManagerLibpod) } @@ -562,14 +591,16 @@ func (c *Container) generateSpec(ctx context.Context) (s *spec.Spec, cleanupFunc // Warning: CDI may alter g.Config in place. if len(c.config.CDIDevices) > 0 { - registry := cdi.GetRegistry( + registry, err := cdi.NewCache( cdi.WithAutoRefresh(false), ) + if err != nil { + return nil, nil, fmt.Errorf("creating CDI registry: %w", err) + } if err := registry.Refresh(); err != nil { logrus.Debugf("The following error was triggered when refreshing the CDI registry: %v", err) } - _, err := registry.InjectDevices(g.Config, c.config.CDIDevices...) - if err != nil { + if _, err := registry.InjectDevices(g.Config, c.config.CDIDevices...); err != nil { return nil, nil, fmt.Errorf("setting up CDI devices: %w", err) } } @@ -632,14 +663,15 @@ func (c *Container) generateSpec(ctx context.Context) (s *spec.Spec, cleanupFunc nofileSet := false nprocSet := false isRootless := rootless.IsRootless() - if isRootless { - if g.Config.Process != nil && g.Config.Process.OOMScoreAdj != nil { - var err error - *g.Config.Process.OOMScoreAdj, err = maybeClampOOMScoreAdj(*g.Config.Process.OOMScoreAdj) - if err != nil { - return nil, nil, err - } + isRunningInUserNs := unshare.IsRootless() + if isRunningInUserNs && g.Config.Process != nil && g.Config.Process.OOMScoreAdj != nil { + var err error + *g.Config.Process.OOMScoreAdj, err = maybeClampOOMScoreAdj(*g.Config.Process.OOMScoreAdj) + if err != nil { + return nil, nil, err } + } + if isRootless { for _, rlimit := range c.config.Spec.Process.Rlimits { if rlimit.Type == "RLIMIT_NOFILE" { nofileSet = true @@ -719,7 +751,7 @@ func (c *Container) isWorkDirSymlink(resolvedPath string) bool { } if resolvedSymlinkWorkdir != "" { resolvedPath = resolvedSymlinkWorkdir - _, err := os.Stat(resolvedSymlinkWorkdir) + err := fileutils.Exists(resolvedSymlinkWorkdir) if err == nil { // Symlink resolved successfully and resolved path exists on container, // this is a valid use-case so return nil. @@ -1417,7 +1449,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti // Let's try to stat() CRIU's inventory file. If it does not exist, it makes // no sense to try a restore. This is a minimal check if a checkpoint exists. - if _, err := os.Stat(filepath.Join(c.CheckpointPath(), "inventory.img")); os.IsNotExist(err) { + if err := fileutils.Exists(filepath.Join(c.CheckpointPath(), "inventory.img")); errors.Is(err, fs.ErrNotExist) { return nil, 0, fmt.Errorf("a complete checkpoint for this container cannot be found, cannot restore: %w", err) } @@ -1627,7 +1659,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti // Restore /dev/shm content if c.config.ShmDir != "" && c.state.BindMounts["/dev/shm"] == c.config.ShmDir { shmDirTarFileFullPath := filepath.Join(c.bundlePath(), metadata.DevShmCheckpointTar) - if _, err := os.Stat(shmDirTarFileFullPath); err != nil { + if err := fileutils.Exists(shmDirTarFileFullPath); err != nil { logrus.Debug("Container checkpoint doesn't contain dev/shm: ", err.Error()) } else { shmDirTarFile, err := os.Open(shmDirTarFileFullPath) @@ -1693,6 +1725,15 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti } } + // setup hosts/resolv.conf files + // Note this should normally be called after the container is created in the runtime but before it is started. + // However restore starts the container right away. This means that if we do the call afterwards there is a + // short interval where the file is still empty. Thus I decided to call it before which makes it not working + // with PostConfigureNetNS (userns) but as this does not work anyway today so I don't see it as problem. + if err := c.completeNetworkSetup(); err != nil { + return nil, 0, fmt.Errorf("complete network setup: %w", err) + } + runtimeRestoreDuration, err = c.ociRuntime.CreateContainer(c, &options) if err != nil { return nil, 0, err @@ -1815,10 +1856,6 @@ func (c *Container) mountIntoRootDirs(mountName string, mountPath string) error // Make standard bind mounts to include in the container func (c *Container) makeBindMounts() error { - if err := idtools.SafeChown(c.state.RunDir, c.RootUID(), c.RootGID()); err != nil { - return fmt.Errorf("cannot chown run directory: %w", err) - } - if c.state.BindMounts == nil { c.state.BindMounts = make(map[string]string) } @@ -1898,15 +1935,6 @@ func (c *Container) makeBindMounts() error { return fmt.Errorf("assigning mounts to container %s: %w", c.ID(), err) } } - - if !hasCurrentUserMapped(c) { - if err := makeAccessible(resolvPath, c.RootUID(), c.RootGID()); err != nil { - return err - } - if err := makeAccessible(hostsPath, c.RootUID(), c.RootGID()); err != nil { - return err - } - } } else { if !c.config.UseImageResolvConf { if err := c.createResolvConf(); err != nil { @@ -2114,8 +2142,8 @@ func (c *Container) addResolvConf() error { // first add the nameservers from the networks status nameservers = networkNameServers - // slirp4netns has a built in DNS forwarder. - nameservers = c.addSlirp4netnsDNS(nameservers) + // pasta and slirp4netns have a built in DNS forwarder. + nameservers = c.addSpecialDNS(nameservers) } // Set DNS search domains @@ -2165,6 +2193,10 @@ func (c *Container) checkForIPv6(netStatus map[string]types.StatusBlock) bool { } } + if c.pastaResult != nil { + return c.pastaResult.IPv6 + } + return c.isSlirp4netnsIPv6() } @@ -2223,11 +2255,10 @@ func (c *Container) getHostsEntries() (etchosts.HostEntries, error) { case c.config.NetMode.IsBridge(): entries = etchosts.GetNetworkHostEntries(c.state.NetworkStatus, names...) case c.config.NetMode.IsPasta(): - ip, err := getPastaIP(c.state) - if err != nil { - return nil, err + // this should never be the case but check just to be sure and not panic + if len(c.pastaResult.IPAddresses) > 0 { + entries = etchosts.HostEntries{{IP: c.pastaResult.IPAddresses[0].String(), Names: names}} } - entries = etchosts.HostEntries{{IP: ip.String(), Names: names}} case c.config.NetMode.IsSlirp4netns(): ip, err := getSlirp4netnsIP(c.slirp4netnsSubnet) if err != nil { @@ -2274,12 +2305,27 @@ func (c *Container) addHosts() error { return err } + var exclude []net.IP + if c.pastaResult != nil { + exclude = c.pastaResult.IPAddresses + } else if c.config.NetMode.IsBridge() { + // When running rootless we have to check the rootless netns ip addresses + // to not assign a ip that is already used in the rootless netns as it would + // not be routed to the host. + // https://github.com/containers/podman/issues/22653 + info, err := c.runtime.network.RootlessNetnsInfo() + if err == nil { + exclude = info.IPAddresses + } + } + return etchosts.New(&etchosts.Params{ - BaseFile: baseHostFile, - ExtraHosts: c.config.HostAdd, - ContainerIPs: containerIPsEntries, - HostContainersInternalIP: etchosts.GetHostContainersInternalIP(c.runtime.config, c.state.NetworkStatus, c.runtime.network), - TargetFile: targetFile, + BaseFile: baseHostFile, + ExtraHosts: c.config.HostAdd, + ContainerIPs: containerIPsEntries, + HostContainersInternalIP: etchosts.GetHostContainersInternalIPExcluding( + c.runtime.config, c.state.NetworkStatus, c.runtime.network, exclude), + TargetFile: targetFile, }) } @@ -2664,13 +2710,13 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) { // do anything more. if needPasswd { passwdPath := filepath.Join(c.config.StaticDir, "passwd") - if _, err := os.Stat(passwdPath); err == nil { + if err := fileutils.Exists(passwdPath); err == nil { needPasswd = false } } if needGroup { groupPath := filepath.Join(c.config.StaticDir, "group") - if _, err := os.Stat(groupPath); err == nil { + if err := fileutils.Exists(groupPath); err == nil { needGroup = false } } @@ -2789,7 +2835,7 @@ func (c *Container) cleanupOverlayMounts() error { // Creates and mounts an empty dir to mount secrets into, if it does not already exist func (c *Container) createSecretMountDir(runPath string) error { src := filepath.Join(c.state.RunDir, "/run/secrets") - _, err := os.Stat(src) + err := fileutils.Exists(src) if os.IsNotExist(err) { if err := umask.MkdirAllIgnoreUmask(src, os.FileMode(0o755)); err != nil { return err @@ -2807,6 +2853,15 @@ func (c *Container) createSecretMountDir(runPath string) error { return err } +func hasIdmapOption(options []string) bool { + for _, o := range options { + if o == "idmap" || strings.HasPrefix(o, "idmap=") { + return true + } + } + return false +} + // Fix ownership and permissions of the specified volume if necessary. func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error { vol, err := c.runtime.state.Volume(v.Name) @@ -2822,15 +2877,31 @@ func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error { return err } + // If the volume is not empty, and it is not the first copy-up event - + // we should not do a chown. + if vol.state.NeedsChown && !vol.state.CopiedUp { + contents, err := os.ReadDir(vol.mountPoint()) + if err != nil { + return fmt.Errorf("reading contents of volume %q: %w", vol.Name(), err) + } + // Not empty, do nothing and unset NeedsChown. + if len(contents) > 0 { + vol.state.NeedsChown = false + if err := vol.save(); err != nil { + return fmt.Errorf("saving volume %q state: %w", vol.Name(), err) + } + return nil + } + } + // Volumes owned by a volume driver are not chowned - we don't want to // mess with a mount not managed by us. if vol.state.NeedsChown && (!vol.UsesVolumeDriver() && vol.config.Driver != "image") { - vol.state.NeedsChown = false - uid := int(c.config.Spec.Process.User.UID) gid := int(c.config.Spec.Process.User.GID) - if c.config.IDMappings.UIDMap != nil { + // if the volume is mounted with "idmap", leave the IDs in from the current environment. + if c.config.IDMappings.UIDMap != nil && !hasIdmapOption(v.Options) { p := idtools.IDPair{ UID: uid, GID: gid, @@ -2844,6 +2915,10 @@ func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error { gid = newPair.GID } + if vol.state.CopiedUp { + vol.state.NeedsChown = false + } + vol.state.CopiedUp = false vol.state.UIDChowned = uid vol.state.GIDChowned = gid @@ -2860,8 +2935,13 @@ func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error { return err } - // Make sure the new volume matches the permissions of the target directory. + // Make sure the new volume matches the permissions of the target directory unless 'U' is + // provided (since the volume was already chowned in this case). // https://github.com/containers/podman/issues/10188 + if slices.Contains(v.Options, "U") { + return nil + } + st, err := os.Lstat(filepath.Join(c.state.Mountpoint, v.Dest)) if err == nil { if stat, ok := st.Sys().(*syscall.Stat_t); ok { @@ -2883,6 +2963,16 @@ func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error { if err := idtools.SafeLchown(mountPoint, uid, gid); err != nil { return err } + + // UID/GID 0 are sticky - if we chown to root, + // we stop chowning thereafter. + if uid == 0 && gid == 0 && vol.state.NeedsChown { + vol.state.NeedsChown = false + + if err := vol.save(); err != nil { + return fmt.Errorf("saving volume %q state to database: %w", vol.Name(), err) + } + } } if err := os.Chmod(mountPoint, st.Mode()); err != nil { return err diff --git a/libpod/container_internal_freebsd.go b/libpod/container_internal_freebsd.go index 0f8bca0a3d..995d519299 100644 --- a/libpod/container_internal_freebsd.go +++ b/libpod/container_internal_freebsd.go @@ -279,7 +279,7 @@ func (c *Container) setCgroupsPath(g *generate.Generator) error { return nil } -func (c *Container) addSlirp4netnsDNS(nameservers []string) []string { +func (c *Container) addSpecialDNS(nameservers []string) []string { return nameservers } diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index 9e279116ca..a7b16a7f0a 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -71,6 +71,14 @@ func (c *Container) prepare() error { go func() { defer wg.Done() + if c.state.State == define.ContainerStateStopped { + // networking should not be reused after a stop + if err := c.cleanupNetwork(); err != nil { + createNetNSErr = err + return + } + } + // Set up network namespace if not already set up noNetNS := c.state.NetNS == "" if c.config.CreateNetNS && noNetNS && !c.config.PostConfigureNetNS { @@ -413,27 +421,6 @@ func (c *Container) getOCICgroupPath() (string, error) { } } -// If the container is rootless, set up the slirp4netns network -func (c *Container) setupRootlessNetwork() error { - // set up slirp4netns again because slirp4netns will die when conmon exits - if c.config.NetMode.IsSlirp4netns() { - err := c.runtime.setupSlirp4netns(c, c.state.NetNS) - if err != nil { - return err - } - } - - // set up rootlesskit port forwarder again since it dies when conmon exits - // we use rootlesskit port forwarder only as rootless and when bridge network is used - if rootless.IsRootless() && c.config.NetMode.IsBridge() && len(c.config.PortMappings) > 0 { - err := c.runtime.setupRootlessPortMappingViaRLK(c, c.state.NetNS, c.state.NetworkStatus) - if err != nil { - return err - } - } - return nil -} - func openDirectory(path string) (fd int, err error) { return unix.Open(path, unix.O_RDONLY|unix.O_PATH, 0) } @@ -616,7 +603,12 @@ func (c *Container) setCgroupsPath(g *generate.Generator) error { return nil } -func (c *Container) addSlirp4netnsDNS(nameservers []string) []string { +// addSpecialDNS adds special dns servers for slirp4netns and pasta +func (c *Container) addSpecialDNS(nameservers []string) []string { + if c.pastaResult != nil { + nameservers = append(nameservers, c.pastaResult.DNSForwardIPs...) + } + // slirp4netns has a built in DNS forwarder. if c.config.NetMode.IsSlirp4netns() { slirp4netnsDNS, err := slirp4netns.GetDNS(c.slirp4netnsSubnet) @@ -680,7 +672,7 @@ func (c *Container) makePlatformBindMounts() error { // Make /etc/hostname // This should never change, so no need to recreate if it exists if _, ok := c.state.BindMounts["/etc/hostname"]; !ok { - hostnamePath, err := c.writeStringToRundir("hostname", c.Hostname()) + hostnamePath, err := c.writeStringToRundir("hostname", c.Hostname()+"\n") if err != nil { return fmt.Errorf("creating hostname file for container %s: %w", c.ID(), err) } diff --git a/libpod/container_path_resolution.go b/libpod/container_path_resolution.go index 682aa93284..068cd284ea 100644 --- a/libpod/container_path_resolution.go +++ b/libpod/container_path_resolution.go @@ -26,7 +26,7 @@ func (c *Container) pathAbs(path string) string { return path } -// resolveContainerPaths resolves the container's mount point and the container +// resolvePath resolves the container's mount point and the container // path as specified by the user. Both may resolve to paths outside of the // container's mount point when the container path hits a volume or bind mount. // @@ -153,7 +153,7 @@ func isPathOnVolume(c *Container, containerPath string) bool { return false } -// findBindMounts checks if the specified containerPath matches the destination +// findBindMount checks if the specified containerPath matches the destination // path of a Mount. Returns a matching Mount or nil. func findBindMount(c *Container, containerPath string) *specs.Mount { cleanedPath := filepath.Clean(containerPath) diff --git a/libpod/container_top_linux.c b/libpod/container_top_linux.c index a7192c54c2..0988a7a14f 100644 --- a/libpod/container_top_linux.c +++ b/libpod/container_top_linux.c @@ -3,6 +3,8 @@ #define _GNU_SOURCE #include +#include +#include #include #include #include @@ -11,6 +13,7 @@ /* keep special_exit_code in sync with container_top_linux.go */ int special_exit_code = 255; +int join_userns = 0; char **argv = NULL; void @@ -33,6 +36,12 @@ set_argv (int pos, char *arg) argv[pos] = arg; } +void +set_userns () +{ + join_userns = 1; +} + /* We use cgo code here so we can fork then exec separately, this is done so we can mount proc after the fork because the pid namespace is @@ -64,6 +73,23 @@ fork_exec_ps () fprintf (stderr, "mount proc: %m"); exit (special_exit_code); } + if (join_userns) + { + // join the userns to make sure uid mapping match + // we are already part of the pidns so so pid 1 is the main container process + r = open ("/proc/1/ns/user", O_CLOEXEC | O_RDONLY); + if (r < 0) + { + fprintf (stderr, "open /proc/1/ns/user: %m"); + exit (special_exit_code); + } + if ((status = setns (r, CLONE_NEWUSER)) < 0) + { + fprintf (stderr, "setns NEWUSER: %m"); + exit (special_exit_code); + } + } + /* use execve to unset all env vars, we do not want to leak anything into the container */ execve (argv[0], argv, NULL); fprintf (stderr, "execve: %m"); diff --git a/libpod/container_top_linux.go b/libpod/container_top_linux.go index c1d78cd4e1..0b85a5a705 100644 --- a/libpod/container_top_linux.go +++ b/libpod/container_top_linux.go @@ -11,6 +11,7 @@ import ( "os/exec" "path/filepath" "runtime" + "slices" "strconv" "strings" "syscall" @@ -22,7 +23,6 @@ import ( "github.com/containers/storage/pkg/reexec" "github.com/google/shlex" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" "golang.org/x/sys/unix" ) @@ -31,6 +31,7 @@ import ( void fork_exec_ps(); void create_argv(int len); void set_argv(int pos, char *arg); +void set_userns(); */ import "C" @@ -56,13 +57,13 @@ func podmanTopMain() { os.Exit(0) } -// podmanTopInner os.Args = {command name} {pid} {psPath} [args...] +// podmanTopInner os.Args = {command name} {pid} {userns(1/0)} {psPath} [args...] // We are rexxec'd in a new mountns, then we need to set some security settings in order // to safely execute ps in the container pid namespace. Most notably make sure podman and // ps are read only to prevent a process from overwriting it. func podmanTopInner() error { - if len(os.Args) < 3 { - return fmt.Errorf("internal error, need at least two arguments") + if len(os.Args) < 4 { + return fmt.Errorf("internal error, need at least three arguments") } // We have to lock the thread as we a) switch namespace below and b) use PR_SET_PDEATHSIG @@ -84,7 +85,7 @@ func podmanTopInner() error { return fmt.Errorf("make / mount private: %w", err) } - psPath := os.Args[2] + psPath := os.Args[3] // try to mount everything read only if err := unix.MountSetattr(0, "/", unix.AT_RECURSIVE, &unix.MountAttr{ @@ -122,8 +123,13 @@ func podmanTopInner() error { } pidFD.Close() + userns := os.Args[2] + if userns == "1" { + C.set_userns() + } + args := []string{psPath} - args = append(args, os.Args[3:]...) + args = append(args, os.Args[4:]...) C.create_argv(C.int(len(args))) for i, arg := range args { @@ -317,7 +323,14 @@ func (c *Container) execPS(psArgs []string) ([]string, bool, error) { wPipe.Close() return nil, true, err } - args := append([]string{podmanTopCommand, strconv.Itoa(c.state.PID), psPath}, psArgs...) + + // see podmanTopInner() + userns := "0" + if len(c.config.IDMappings.UIDMap) > 0 { + userns = "1" + } + + args := append([]string{podmanTopCommand, strconv.Itoa(c.state.PID), userns, psPath}, psArgs...) cmd := reexec.Command(args...) cmd.SysProcAttr = &syscall.SysProcAttr{ diff --git a/libpod/define/annotations.go b/libpod/define/annotations.go index 3ceac2dfe4..ac1956f56b 100644 --- a/libpod/define/annotations.go +++ b/libpod/define/annotations.go @@ -160,8 +160,11 @@ const ( // the k8s behavior of waiting for the intialDelaySeconds to be over before updating the status KubeHealthCheckAnnotation = "io.podman.annotations.kube.health.check" - // MaxKubeAnnotation is the max length of annotations allowed by Kubernetes. - MaxKubeAnnotation = 63 + // KubeImageAutomountAnnotation + KubeImageAutomountAnnotation = "io.podman.annotations.kube.image.volumes.mount" + + // TotalAnnotationSizeLimitB is the max length of annotations allowed by Kubernetes. + TotalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB ) // IsReservedAnnotation returns true if the specified value corresponds to an diff --git a/libpod/define/config.go b/libpod/define/config.go index e5729d47ea..cf9fd3812e 100644 --- a/libpod/define/config.go +++ b/libpod/define/config.go @@ -82,6 +82,9 @@ const NoLogging = "none" // PassthroughLogging is the string conmon expects when specifying to use the passthrough driver const PassthroughLogging = "passthrough" +// PassthroughTTYLogging is the string conmon expects when specifying to use the passthrough driver even on a tty. +const PassthroughTTYLogging = "passthrough-tty" + // DefaultRlimitValue is the value set by default for nofile and nproc const RLimitDefaultValue = uint64(1048576) diff --git a/libpod/define/container.go b/libpod/define/container.go index da2441fb9b..ce7605992a 100644 --- a/libpod/define/container.go +++ b/libpod/define/container.go @@ -1,5 +1,9 @@ package define +import ( + "fmt" +) + // Valid restart policy types. const ( // RestartPolicyNone indicates that no restart policy has been requested @@ -27,6 +31,16 @@ var RestartPolicyMap = map[string]string{ RestartPolicyUnlessStopped: RestartPolicyUnlessStopped, } +// Validate that the given string is a valid restart policy. +func ValidateRestartPolicy(policy string) error { + switch policy { + case RestartPolicyNone, RestartPolicyNo, RestartPolicyOnFailure, RestartPolicyAlways, RestartPolicyUnlessStopped: + return nil + default: + return fmt.Errorf("%q is not a valid restart policy: %w", policy, ErrInvalidArg) + } +} + // InitContainerTypes const ( // AlwaysInitContainer is an init container that runs on each diff --git a/libpod/define/container_inspect.go b/libpod/define/container_inspect.go index dbb7a06c91..240b09d21d 100644 --- a/libpod/define/container_inspect.go +++ b/libpod/define/container_inspect.go @@ -1,9 +1,13 @@ package define import ( + "encoding/json" + "fmt" + "strings" "time" "github.com/containers/image/v5/manifest" + "github.com/containers/podman/v5/pkg/signal" ) type InspectIDMappings struct { @@ -44,7 +48,7 @@ type InspectContainerConfig struct { // Container working directory WorkingDir string `json:"WorkingDir"` // Container entrypoint - Entrypoint string `json:"Entrypoint"` + Entrypoint []string `json:"Entrypoint"` // On-build arguments - presently unused. More of Buildah's domain. OnBuild *string `json:"OnBuild"` // Container labels @@ -52,7 +56,7 @@ type InspectContainerConfig struct { // Container annotations Annotations map[string]string `json:"Annotations"` // Container stop signal - StopSignal uint `json:"StopSignal"` + StopSignal string `json:"StopSignal"` // Configured healthcheck for the container Healthcheck *manifest.Schema2HealthConfig `json:"Healthcheck,omitempty"` // HealthcheckOnFailureAction defines an action to take once the container turns unhealthy. @@ -85,6 +89,77 @@ type InspectContainerConfig struct { SdNotifyMode string `json:"sdNotifyMode,omitempty"` // SdNotifySocket is the NOTIFY_SOCKET in use by/configured for the container. SdNotifySocket string `json:"sdNotifySocket,omitempty"` + + // V4PodmanCompatMarshal indicates that the json marshaller should + // use the old v4 inspect format to keep API compatibility. + V4PodmanCompatMarshal bool `json:"-"` +} + +// UnmarshalJSON allow compatibility with podman V4 API +func (insp *InspectContainerConfig) UnmarshalJSON(data []byte) error { + type Alias InspectContainerConfig + aux := &struct { + Entrypoint interface{} `json:"Entrypoint"` + StopSignal interface{} `json:"StopSignal"` + *Alias + }{ + Alias: (*Alias)(insp), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + switch entrypoint := aux.Entrypoint.(type) { + case string: + insp.Entrypoint = strings.Split(entrypoint, " ") + case []string: + insp.Entrypoint = entrypoint + case []interface{}: + insp.Entrypoint = []string{} + for _, entry := range entrypoint { + if str, ok := entry.(string); ok { + insp.Entrypoint = append(insp.Entrypoint, str) + } + } + case nil: + insp.Entrypoint = []string{} + default: + return fmt.Errorf("cannot unmarshal Config.Entrypoint of type %T", entrypoint) + } + + switch stopsignal := aux.StopSignal.(type) { + case string: + insp.StopSignal = stopsignal + case float64: + insp.StopSignal = signal.ToDockerFormat(uint(stopsignal)) + case nil: + break + default: + return fmt.Errorf("cannot unmarshal Config.StopSignal of type %T", stopsignal) + } + return nil +} + +func (insp *InspectContainerConfig) MarshalJSON() ([]byte, error) { + // the alias is needed otherwise MarshalJSON will + type Alias InspectContainerConfig + conf := (*Alias)(insp) + if !insp.V4PodmanCompatMarshal { + return json.Marshal(conf) + } + + type v4InspectContainerConfig struct { + Entrypoint string `json:"Entrypoint"` + StopSignal uint `json:"StopSignal"` + *Alias + } + stopSignal, _ := signal.ParseSignal(insp.StopSignal) + newConf := &v4InspectContainerConfig{ + Entrypoint: strings.Join(insp.Entrypoint, " "), + StopSignal: uint(stopSignal), + Alias: conf, + } + return json.Marshal(newConf) } // InspectRestartPolicy holds information about the container's restart policy. @@ -315,6 +390,9 @@ type InspectContainerHostConfig struct { // It is not handled directly within libpod and is stored in an // annotation. AutoRemove bool `json:"AutoRemove"` + // Annotations are provided to the runtime when the container is + // started. + Annotations map[string]string `json:"Annotations"` // VolumeDriver is presently unused and is retained for Docker // compatibility. VolumeDriver string `json:"VolumeDriver"` diff --git a/libpod/define/exec_codes.go b/libpod/define/exec_codes.go index a84730e722..46a8e9d86a 100644 --- a/libpod/define/exec_codes.go +++ b/libpod/define/exec_codes.go @@ -29,6 +29,10 @@ func TranslateExecErrorToExitCode(originalEC int, err error) int { if errors.Is(err, ErrOCIRuntimeNotFound) { return ExecErrorCodeNotFound } + if errors.Is(err, ErrInvalidArg) { + return ExecErrorCodeGeneric + } + return originalEC } diff --git a/libpod/define/info.go b/libpod/define/info.go index 564aad4b9a..bc37d81396 100644 --- a/libpod/define/info.go +++ b/libpod/define/info.go @@ -51,8 +51,10 @@ type HostInfo struct { OCIRuntime *OCIRuntimeInfo `json:"ociRuntime"` OS string `json:"os"` // RemoteSocket returns the UNIX domain socket the Podman service is listening on - RemoteSocket *RemoteSocket `json:"remoteSocket,omitempty"` - RuntimeInfo map[string]interface{} `json:"runtimeInfo,omitempty"` + RemoteSocket *RemoteSocket `json:"remoteSocket,omitempty"` + // RootlessNetworkCmd returns the default rootless network command (slirp4netns or pasta) + RootlessNetworkCmd string `json:"rootlessNetworkCmd"` + RuntimeInfo map[string]interface{} `json:"runtimeInfo,omitempty"` // ServiceIsRemote is true when the podman/libpod service is remote to the client ServiceIsRemote bool `json:"serviceIsRemote"` Security SecurityInfo `json:"security"` diff --git a/libpod/events.go b/libpod/events.go index 6e862f3676..92af63632c 100644 --- a/libpod/events.go +++ b/libpod/events.go @@ -28,22 +28,29 @@ func (r *Runtime) newEventer() (events.Eventer, error) { // newContainerEvent creates a new event based on a container func (c *Container) newContainerEvent(status events.Status) { - if err := c.newContainerEventWithInspectData(status, false); err != nil { + if err := c.newContainerEventWithInspectData(status, "", false); err != nil { + logrus.Errorf("Unable to write container event: %v", err) + } +} + +// newContainerHealthCheckEvent creates a new healthcheck event with the given status +func (c *Container) newContainerHealthCheckEvent(healthStatus string) { + if err := c.newContainerEventWithInspectData(events.HealthStatus, healthStatus, false); err != nil { logrus.Errorf("Unable to write container event: %v", err) } } // newContainerEventWithInspectData creates a new event and sets the // ContainerInspectData field if inspectData is set. -func (c *Container) newContainerEventWithInspectData(status events.Status, inspectData bool) error { +func (c *Container) newContainerEventWithInspectData(status events.Status, healthStatus string, inspectData bool) error { e := events.NewEvent(status) e.ID = c.ID() e.Name = c.Name() e.Image = c.config.RootfsImageName e.Type = events.Container + e.HealthStatus = healthStatus e.Details = events.Details{ - ID: e.ID, PodID: c.PodID(), Attributes: c.Labels(), } @@ -66,16 +73,6 @@ func (c *Container) newContainerEventWithInspectData(status events.Status, inspe } } - // if the current event is a HealthStatus event, we need to get the current - // status of the container to pass to the event - if status == events.HealthStatus { - containerHealthStatus, err := c.healthCheckStatus() - if err != nil { - e.HealthStatus = err.Error() - } - e.HealthStatus = containerHealthStatus - } - if status == events.Remove { exitCode, err := c.runtime.state.GetContainerExitCode(c.ID()) if err == nil { @@ -99,7 +96,6 @@ func (c *Container) newContainerExitedEvent(exitCode int32) { e.ContainerExitCode = &intExitCode e.Details = events.Details{ - ID: e.ID, Attributes: c.Labels(), } @@ -121,7 +117,6 @@ func (c *Container) newExecDiedEvent(sessionID string, exitCode int) { e.Attributes["execID"] = sessionID e.Details = events.Details{ - ID: e.ID, Attributes: c.Labels(), } @@ -130,7 +125,7 @@ func (c *Container) newExecDiedEvent(sessionID string, exitCode int) { } } -// netNetworkEvent creates a new event based on a network connect/disconnect +// newNetworkEvent creates a new event based on a network connect/disconnect func (c *Container) newNetworkEvent(status events.Status, netName string) { e := events.NewEvent(status) e.ID = c.ID() diff --git a/libpod/events/config.go b/libpod/events/config.go index 7b31842097..d0ab5d45f0 100644 --- a/libpod/events/config.go +++ b/libpod/events/config.go @@ -41,6 +41,8 @@ type Event struct { Type Type // Health status of the current container HealthStatus string `json:"health_status,omitempty"` + // Error code for certain events involving errors. + Error string `json:"error,omitempty"` Details } @@ -48,8 +50,6 @@ type Event struct { // Details describes specifics about certain events, specifically around // container events type Details struct { - // ID is the event ID - ID string // ContainerInspectData includes the payload of the container's inspect // data. Only set when events_container_create_inspect_data is set true // in containers.conf. @@ -172,6 +172,8 @@ const ( Prune Status = "prune" // Pull ... Pull Status = "pull" + // PullError is an error pulling an image + PullError Status = "pull-error" // Push ... Push Status = "push" // Refresh indicates that the system refreshed the state after a @@ -206,6 +208,8 @@ const ( Unpause Status = "unpause" // Untag ... Untag Status = "untag" + // Update indicates that a container's configuration has been modified. + Update Status = "update" ) // EventFilter for filtering events diff --git a/libpod/events/events.go b/libpod/events/events.go index 18f5314691..5eda0033cc 100644 --- a/libpod/events/events.go +++ b/libpod/events/events.go @@ -90,6 +90,9 @@ func (e *Event) ToHumanReadable(truncate bool) string { humanFormat = fmt.Sprintf("%s %s %s %s (container=%s, name=%s)", e.Time, e.Type, e.Status, id, id, e.Network) case Image: humanFormat = fmt.Sprintf("%s %s %s %s %s", e.Time, e.Type, e.Status, id, e.Name) + if e.Error != "" { + humanFormat += " " + e.Error + } case System: if e.Name != "" { humanFormat = fmt.Sprintf("%s %s %s %s", e.Time, e.Type, e.Status, e.Name) @@ -102,7 +105,7 @@ func (e *Event) ToHumanReadable(truncate bool) string { return humanFormat } -// NewEventFromString takes stringified json and converts +// newEventFromJSONString takes stringified json and converts // it to an event func newEventFromJSONString(event string) (*Event, error) { e := new(Event) @@ -194,6 +197,8 @@ func StringToStatus(name string) (Status, error) { return Prune, nil case Pull.String(): return Pull, nil + case PullError.String(): + return PullError, nil case Push.String(): return Push, nil case Refresh.String(): @@ -226,6 +231,8 @@ func StringToStatus(name string) (Status, error) { return Unpause, nil case Untag.String(): return Untag, nil + case Update.String(): + return Update, nil } return "", fmt.Errorf("unknown event status %q", name) } diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go index 273c5307dc..2ee94090f8 100644 --- a/libpod/events/journal_linux.go +++ b/libpod/events/journal_linux.go @@ -43,6 +43,9 @@ func (e EventJournalD) Write(ee Event) error { case Image: m["PODMAN_NAME"] = ee.Name m["PODMAN_ID"] = ee.ID + if ee.Error != "" { + m["ERROR"] = ee.Error + } case Container, Pod: m["PODMAN_IMAGE"] = ee.Image m["PODMAN_NAME"] = ee.Name @@ -228,6 +231,9 @@ func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) { newEvent.Network = entry.Fields["PODMAN_NETWORK_NAME"] case Image: newEvent.ID = entry.Fields["PODMAN_ID"] + if val, ok := entry.Fields["ERROR"]; ok { + newEvent.Error = val + } } return &newEvent, nil } diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go index 47ddc04948..3b027e63ec 100644 --- a/libpod/healthcheck.go +++ b/libpod/healthcheck.go @@ -7,13 +7,13 @@ import ( "context" "errors" "fmt" + "io/fs" "os" "path/filepath" "strings" "time" "github.com/containers/podman/v5/libpod/define" - "github.com/containers/podman/v5/libpod/events" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -176,7 +176,9 @@ func (c *Container) runHealthCheck(ctx context.Context, isStartup bool) (define. if hcResult == define.HealthCheckNotDefined || hcResult == define.HealthCheckInternalError { return hcResult, logStatus, hcErr } - c.newContainerEvent(events.HealthStatus) + if c.runtime.config.Engine.HealthcheckEvents { + c.newContainerHealthCheckEvent(logStatus) + } return hcResult, logStatus, hcErr } @@ -276,6 +278,7 @@ func (c *Container) incrementStartupHCSuccessCounter(ctx context.Context) { if recreateTimer { logrus.Infof("Startup healthcheck for container %s passed, recreating timer", c.ID()) + oldUnit := c.state.HCUnitName // Create the new, standard healthcheck timer first. if err := c.createTimer(c.HealthCheckConfig().Interval.String(), false); err != nil { logrus.Errorf("Error recreating container %s healthcheck: %v", c.ID(), err) @@ -289,7 +292,7 @@ func (c *Container) incrementStartupHCSuccessCounter(ctx context.Context) { // Which happens to be us. // So this has to be last - after this, systemd serves us a // SIGTERM and we exit. - if err := c.removeTransientFiles(ctx, true); err != nil { + if err := c.removeTransientFiles(ctx, true, oldUnit); err != nil { logrus.Errorf("Error removing container %s healthcheck: %v", c.ID(), err) return } @@ -346,7 +349,7 @@ func newHealthCheckLog(start, end time.Time, exitCode int, log string) define.He } } -// updatedHealthCheckStatus updates the health status of the container +// updateHealthStatus updates the health status of the container // in the healthcheck log func (c *Container) updateHealthStatus(status string) error { healthCheck, err := c.getHealthCheckLog() @@ -428,11 +431,12 @@ func (c *Container) healthCheckLogPath() string { // The caller should lock the container before this function is called. func (c *Container) getHealthCheckLog() (define.HealthCheckResults, error) { var healthCheck define.HealthCheckResults - if _, err := os.Stat(c.healthCheckLogPath()); os.IsNotExist(err) { - return healthCheck, nil - } b, err := os.ReadFile(c.healthCheckLogPath()) if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // If the file does not exists just return empty healthcheck and no error. + return healthCheck, nil + } return healthCheck, fmt.Errorf("failed to read health check log file: %w", err) } if err := json.Unmarshal(b, &healthCheck); err != nil { diff --git a/libpod/healthcheck_linux.go b/libpod/healthcheck_linux.go index b7b66a9392..344c4a2029 100644 --- a/libpod/healthcheck_linux.go +++ b/libpod/healthcheck_linux.go @@ -5,6 +5,7 @@ package libpod import ( "context" "fmt" + "math/rand" "os" "os/exec" "strings" @@ -21,6 +22,9 @@ func (c *Container) createTimer(interval string, isStartup bool) error { if c.disableHealthCheckSystemd(isStartup) { return nil } + + hcUnitName := c.hcUnitName(isStartup, false) + podman, err := os.Executable() if err != nil { return fmt.Errorf("failed to get path for podman for a health check timer: %w", err) @@ -35,7 +39,7 @@ func (c *Container) createTimer(interval string, isStartup bool) error { cmd = append(cmd, "--setenv=PATH="+path) } - cmd = append(cmd, "--unit", c.hcUnitName(isStartup), fmt.Sprintf("--on-unit-inactive=%s", interval), "--timer-property=AccuracySec=1s", podman) + cmd = append(cmd, "--unit", hcUnitName, fmt.Sprintf("--on-unit-inactive=%s", interval), "--timer-property=AccuracySec=1s", podman) if logrus.IsLevelEnabled(logrus.DebugLevel) { cmd = append(cmd, "--log-level=debug", "--syslog") @@ -53,6 +57,12 @@ func (c *Container) createTimer(interval string, isStartup bool) error { if output, err := systemdRun.CombinedOutput(); err != nil { return fmt.Errorf("%s", output) } + + c.state.HCUnitName = hcUnitName + if err := c.save(); err != nil { + return fmt.Errorf("saving container %s healthcheck unit name: %w", c.ID(), err) + } + return nil } @@ -72,13 +82,19 @@ func (c *Container) startTimer(isStartup bool) error { if c.disableHealthCheckSystemd(isStartup) { return nil } + + hcUnitName := c.state.HCUnitName + if hcUnitName == "" { + hcUnitName = c.hcUnitName(isStartup, true) + } + conn, err := systemd.ConnectToDBUS() if err != nil { return fmt.Errorf("unable to get systemd connection to start healthchecks: %w", err) } defer conn.Close() - startFile := fmt.Sprintf("%s.service", c.hcUnitName(isStartup)) + startFile := fmt.Sprintf("%s.service", hcUnitName) startChan := make(chan string) if _, err := conn.RestartUnitContext(context.Background(), startFile, "fail", startChan); err != nil { return err @@ -92,7 +108,7 @@ func (c *Container) startTimer(isStartup bool) error { // removeTransientFiles removes the systemd timer and unit files // for the container -func (c *Container) removeTransientFiles(ctx context.Context, isStartup bool) error { +func (c *Container) removeTransientFiles(ctx context.Context, isStartup bool, unitName string) error { if c.disableHealthCheckSystemd(isStartup) { return nil } @@ -106,10 +122,13 @@ func (c *Container) removeTransientFiles(ctx context.Context, isStartup bool) er // clean up as much as possible. stopErrors := []error{} + if unitName == "" { + unitName = c.hcUnitName(isStartup, true) + } // Stop the timer before the service to make sure the timer does not // fire after the service is stopped. timerChan := make(chan string) - timerFile := fmt.Sprintf("%s.timer", c.hcUnitName(isStartup)) + timerFile := fmt.Sprintf("%s.timer", unitName) if _, err := conn.StopUnitContext(ctx, timerFile, "ignore-dependencies", timerChan); err != nil { if !strings.HasSuffix(err.Error(), ".timer not loaded.") { stopErrors = append(stopErrors, fmt.Errorf("removing health-check timer %q: %w", timerFile, err)) @@ -121,7 +140,7 @@ func (c *Container) removeTransientFiles(ctx context.Context, isStartup bool) er // Reset the service before stopping it to make sure it's being removed // on stop. serviceChan := make(chan string) - serviceFile := fmt.Sprintf("%s.service", c.hcUnitName(isStartup)) + serviceFile := fmt.Sprintf("%s.service", unitName) if err := conn.ResetFailedUnitContext(ctx, serviceFile); err != nil { logrus.Debugf("Failed to reset unit file: %q", err) } @@ -151,11 +170,19 @@ func (c *Container) disableHealthCheckSystemd(isStartup bool) bool { return false } -// Systemd unit name for the healthcheck systemd unit -func (c *Container) hcUnitName(isStartup bool) string { +// Systemd unit name for the healthcheck systemd unit. +// Bare indicates that a random suffix should not be applied to the name. This +// was default behavior previously, and is used for backwards compatibility. +func (c *Container) hcUnitName(isStartup, bare bool) string { unitName := c.ID() if isStartup { unitName += "-startup" } + if !bare { + // Ensure that unit names are unique from run to run by appending + // a random suffix. + // Ref: RH Jira RHEL-26105 + unitName += fmt.Sprintf("-%x", rand.Int()) + } return unitName } diff --git a/libpod/healthcheck_nosystemd_linux.go b/libpod/healthcheck_nosystemd_linux.go index cd8503f82a..c338caf1cd 100644 --- a/libpod/healthcheck_nosystemd_linux.go +++ b/libpod/healthcheck_nosystemd_linux.go @@ -18,6 +18,6 @@ func (c *Container) startTimer(isStartup bool) error { // removeTransientFiles removes the systemd timer and unit files // for the container -func (c *Container) removeTransientFiles(ctx context.Context, isStartup bool) error { +func (c *Container) removeTransientFiles(ctx context.Context, isStartup bool, unitName string) error { return nil } diff --git a/libpod/healthcheck_unsupported.go b/libpod/healthcheck_unsupported.go index 0517465dbc..8d733698b8 100644 --- a/libpod/healthcheck_unsupported.go +++ b/libpod/healthcheck_unsupported.go @@ -18,6 +18,6 @@ func (c *Container) startTimer(isStartup bool) error { // removeTransientFiles removes the systemd timer and unit files // for the container -func (c *Container) removeTransientFiles(ctx context.Context, isStartup bool) error { +func (c *Container) removeTransientFiles(ctx context.Context, isStartup bool, unitName string) error { return nil } diff --git a/libpod/info.go b/libpod/info.go index 140b535ef7..24e2fd910e 100644 --- a/libpod/info.go +++ b/libpod/info.go @@ -126,6 +126,7 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) { NetworkBackend: r.config.Network.NetworkBackend, NetworkBackendInfo: r.network.NetworkInfo(), OS: runtime.GOOS, + RootlessNetworkCmd: r.config.Network.DefaultRootlessNetworkCmd, SwapFree: mi.SwapFree, SwapTotal: mi.SwapTotal, } diff --git a/libpod/info_linux.go b/libpod/info_linux.go index ee34c8e200..1e6bed8c89 100644 --- a/libpod/info_linux.go +++ b/libpod/info_linux.go @@ -18,6 +18,8 @@ import ( "github.com/containers/common/pkg/version" "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/util" + "github.com/containers/storage/pkg/unshare" "github.com/opencontainers/selinux/go-selinux" "github.com/sirupsen/logrus" ) @@ -91,17 +93,13 @@ func (r *Runtime) setPlatformHostInfo(info *define.HostInfo) error { } if rootless.IsRootless() { - uidmappings, err := rootless.ReadMappingsProc("/proc/self/uid_map") + uidmappings, gidmappings, err := unshare.GetHostIDMappings("") if err != nil { - return fmt.Errorf("reading uid mappings: %w", err) - } - gidmappings, err := rootless.ReadMappingsProc("/proc/self/gid_map") - if err != nil { - return fmt.Errorf("reading gid mappings: %w", err) + return fmt.Errorf("reading id mappings: %w", err) } idmappings := define.IDMappings{ - GIDMap: gidmappings, - UIDMap: uidmappings, + GIDMap: util.RuntimeSpecToIDtools(gidmappings), + UIDMap: util.RuntimeSpecToIDtools(uidmappings), } info.IDMappings = idmappings } diff --git a/libpod/kube.go b/libpod/kube.go index 5a45087e94..b376f9ef8f 100644 --- a/libpod/kube.go +++ b/libpod/kube.go @@ -9,16 +9,15 @@ import ( "math/rand" "os" "reflect" + "slices" "sort" "strconv" "strings" "time" - "unicode/utf8" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" "github.com/containers/podman/v5/libpod/define" - "github.com/containers/podman/v5/pkg/annotations" "github.com/containers/podman/v5/pkg/domain/entities" "github.com/containers/podman/v5/pkg/env" v1 "github.com/containers/podman/v5/pkg/k8s.io/api/core/v1" @@ -31,19 +30,18 @@ import ( "github.com/containers/podman/v5/pkg/util" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // GenerateForKube takes a slice of libpod containers and generates // one v1.Pod description that includes just a single container. -func GenerateForKube(ctx context.Context, ctrs []*Container, getService, useLongAnnotations, podmanOnly bool) (*v1.Pod, error) { +func GenerateForKube(ctx context.Context, ctrs []*Container, getService, podmanOnly bool) (*v1.Pod, error) { // Generate the v1.Pod yaml description - return simplePodWithV1Containers(ctx, ctrs, getService, useLongAnnotations, podmanOnly) + return simplePodWithV1Containers(ctx, ctrs, getService, podmanOnly) } // GenerateForKube takes a slice of libpod containers and generates // one v1.Pod description -func (p *Pod) GenerateForKube(ctx context.Context, getService, useLongAnnotations, podmanOnly bool) (*v1.Pod, []v1.ServicePort, error) { +func (p *Pod) GenerateForKube(ctx context.Context, getService, podmanOnly bool) (*v1.Pod, []v1.ServicePort, error) { // Generate the v1.Pod yaml description var ( ports []v1.ContainerPort @@ -95,7 +93,7 @@ func (p *Pod) GenerateForKube(ctx context.Context, getService, useLongAnnotation hostUsers = infraContainer.IDMappings().HostUIDMapping && infraContainer.IDMappings().HostGIDMapping infraName = infraContainer.config.Name } - pod, err := p.podWithContainers(ctx, allContainers, ports, hostNetwork, hostUsers, getService, useLongAnnotations, podmanOnly, infraName) + pod, err := p.podWithContainers(ctx, allContainers, ports, hostNetwork, hostUsers, getService, podmanOnly, infraName) if err != nil { return nil, servicePorts, err } @@ -451,16 +449,6 @@ func newServicePortState() servicePortState { } } -func truncateKubeAnnotation(str string, useLongAnnotations bool) string { - str = strings.TrimSpace(str) - if useLongAnnotations || utf8.RuneCountInString(str) < define.MaxKubeAnnotation { - return str - } - trunc := string([]rune(str)[:define.MaxKubeAnnotation]) - logrus.Warnf("Truncation Annotation: %q to %q: Kubernetes only allows %d characters", str, trunc, define.MaxKubeAnnotation) - return trunc -} - // containerPortsToServicePorts takes a slice of containerports and generates a // slice of service ports func (state *servicePortState) containerPortsToServicePorts(containerPorts []v1.ContainerPort) ([]v1.ServicePort, error) { @@ -507,7 +495,7 @@ func containersToServicePorts(containers []v1.Container) ([]v1.ServicePort, erro return sps, nil } -func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, ports []v1.ContainerPort, hostNetwork, hostUsers, getService, useLongAnnotations, podmanOnly bool, infraName string) (*v1.Pod, error) { +func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, ports []v1.ContainerPort, hostNetwork, hostUsers, getService, podmanOnly bool, infraName string) (*v1.Pod, error) { deDupPodVolumes := make(map[string]*v1.Volume) first := true podContainers := make([]v1.Container, 0, len(containers)) @@ -526,14 +514,14 @@ func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, po for _, ctr := range containers { if !ctr.IsInfra() { for k, v := range ctr.config.Spec.Annotations { - if !podmanOnly && (define.IsReservedAnnotation(k) || annotations.IsReservedAnnotation(k)) { + if !podmanOnly && (define.IsReservedAnnotation(k)) { continue } - podAnnotations[fmt.Sprintf("%s/%s", k, removeUnderscores(ctr.Name()))] = truncateKubeAnnotation(v, useLongAnnotations) + podAnnotations[fmt.Sprintf("%s/%s", k, removeUnderscores(ctr.Name()))] = v } // Convert auto-update labels into kube annotations - for k, v := range getAutoUpdateAnnotations(ctr.Name(), ctr.Labels(), useLongAnnotations) { - podAnnotations[k] = truncateKubeAnnotation(v, useLongAnnotations) + for k, v := range getAutoUpdateAnnotations(ctr.Name(), ctr.Labels()) { + podAnnotations[k] = v } isInit := ctr.IsInitCtr() // Since hostname is only set at pod level, set the hostname to the hostname of the first container we encounter @@ -556,7 +544,7 @@ func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, po return nil, err } for k, v := range annotations { - podAnnotations[define.BindMountPrefix] = truncateKubeAnnotation(k+":"+v, useLongAnnotations) + podAnnotations[define.BindMountPrefix] = k + ":" + v } // Since port bindings for the pod are handled by the // infra container, wipe them here only if we are sharing the net namespace @@ -605,7 +593,7 @@ func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, po // If the infraName is not the podID-infra, that means the user set another infra name using // --infra-name during pod creation if infraName != "" && infraName != p.ID()[:12]+"-infra" { - podAnnotations[define.InfraNameAnnotation] = truncateKubeAnnotation(infraName, useLongAnnotations) + podAnnotations[define.InfraNameAnnotation] = infraName } } } @@ -674,7 +662,7 @@ func newPodObject(podName string, annotations map[string]string, initCtrs, conta // simplePodWithV1Containers is a function used by inspect when kube yaml needs to be generated // for a single container. we "insert" that container description in a pod. -func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getService, useLongAnnotations, podmanOnly bool) (*v1.Pod, error) { +func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getService, podmanOnly bool) (*v1.Pod, error) { kubeCtrs := make([]v1.Container, 0, len(ctrs)) kubeInitCtrs := []v1.Container{} kubeVolumes := make([]v1.Volume, 0) @@ -691,15 +679,15 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getServic for _, ctr := range ctrs { ctrNames = append(ctrNames, removeUnderscores(ctr.Name())) for k, v := range ctr.config.Spec.Annotations { - if !podmanOnly && (define.IsReservedAnnotation(k) || annotations.IsReservedAnnotation(k)) { + if !podmanOnly && define.IsReservedAnnotation(k) { continue } - kubeAnnotations[fmt.Sprintf("%s/%s", k, removeUnderscores(ctr.Name()))] = truncateKubeAnnotation(v, useLongAnnotations) + kubeAnnotations[fmt.Sprintf("%s/%s", k, removeUnderscores(ctr.Name()))] = v } // Convert auto-update labels into kube annotations - for k, v := range getAutoUpdateAnnotations(ctr.Name(), ctr.Labels(), useLongAnnotations) { - kubeAnnotations[k] = truncateKubeAnnotation(v, useLongAnnotations) + for k, v := range getAutoUpdateAnnotations(ctr.Name(), ctr.Labels()) { + kubeAnnotations[k] = v } isInit := ctr.IsInitCtr() @@ -752,7 +740,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getServic return nil, err } for k, v := range annotations { - kubeAnnotations[define.BindMountPrefix] = truncateKubeAnnotation(k+":"+v, useLongAnnotations) + kubeAnnotations[define.BindMountPrefix] = k + ":" + v } if isInit { kubeInitCtrs = append(kubeInitCtrs, kubeCtr) @@ -1320,7 +1308,7 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, bool, error scHasData = true sc.ReadOnlyRootFilesystem = &ro } - if c.config.Spec.Linux.MaskedPaths == nil { + if c.config.Spec.Linux != nil && c.config.Spec.Linux.MaskedPaths == nil { scHasData = true unmask := v1.UnmaskedProcMount sc.ProcMount = &unmask @@ -1384,7 +1372,7 @@ func removeUnderscores(s string) string { // getAutoUpdateAnnotations searches for auto-update container labels // and returns them as kube annotations -func getAutoUpdateAnnotations(ctrName string, ctrLabels map[string]string, useLongAnnotations bool) map[string]string { +func getAutoUpdateAnnotations(ctrName string, ctrLabels map[string]string) map[string]string { autoUpdateLabel := "io.containers.autoupdate" annotations := make(map[string]string) @@ -1394,7 +1382,7 @@ func getAutoUpdateAnnotations(ctrName string, ctrLabels map[string]string, useLo // since labels can variate between containers within a pod, they will be // identified with the container name when converted into kube annotations kc := fmt.Sprintf("%s/%s", k, ctrName) - annotations[kc] = truncateKubeAnnotation(v, useLongAnnotations) + annotations[kc] = v } } diff --git a/libpod/lock/file/file_lock.go b/libpod/lock/file/file_lock.go index e26ca24af9..6e7dda5459 100644 --- a/libpod/lock/file/file_lock.go +++ b/libpod/lock/file/file_lock.go @@ -7,6 +7,7 @@ import ( "strconv" "syscall" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/lockfile" "github.com/sirupsen/logrus" ) @@ -20,7 +21,7 @@ type FileLocks struct { //nolint:revive // struct name stutters // CreateFileLock sets up a directory containing the various lock files. func CreateFileLock(path string) (*FileLocks, error) { - _, err := os.Stat(path) + err := fileutils.Exists(path) if err == nil { return nil, fmt.Errorf("directory %s exists: %w", path, syscall.EEXIST) } @@ -37,7 +38,7 @@ func CreateFileLock(path string) (*FileLocks, error) { // OpenFileLock opens an existing directory with the lock files. func OpenFileLock(path string) (*FileLocks, error) { - _, err := os.Stat(path) + err := fileutils.Exists(path) if err != nil { return nil, err } diff --git a/libpod/networking_common.go b/libpod/networking_common.go index 5756b71f13..6bd32a3d95 100644 --- a/libpod/networking_common.go +++ b/libpod/networking_common.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "regexp" + "slices" "sort" "github.com/containers/common/libnetwork/etchosts" @@ -18,7 +19,6 @@ import ( "github.com/containers/podman/v5/pkg/rootless" "github.com/containers/storage/pkg/lockfile" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // convertPortMappings will remove the HostIP part from the ports when running inside podman machine. diff --git a/libpod/networking_freebsd.go b/libpod/networking_freebsd.go index 2d5c5e4885..4dc0ff25e0 100644 --- a/libpod/networking_freebsd.go +++ b/libpod/networking_freebsd.go @@ -268,7 +268,3 @@ func (c *Container) reloadRootlessRLKPortMapping() error { func (c *Container) setupRootlessNetwork() error { return nil } - -func getPastaIP(state *ContainerState) (net.IP, error) { - return nil, fmt.Errorf("pasta networking is Linux only") -} diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index 10e0ec53e4..3ffc9d7159 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -11,7 +11,6 @@ import ( "github.com/containernetworking/plugins/pkg/ns" "github.com/containers/common/libnetwork/types" - netUtil "github.com/containers/common/libnetwork/util" "github.com/containers/common/pkg/netns" "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/pkg/rootless" @@ -191,7 +190,7 @@ func getContainerNetNS(ctr *Container) (string, *Container, error) { func getContainerNetIO(ctr *Container) (map[string]define.ContainerNetworkStats, error) { perNetworkStats := make(map[string]define.ContainerNetworkStats) - netNSPath, otherCtr, netPathErr := getContainerNetNS(ctr) + netNSPath, _, netPathErr := getContainerNetNS(ctr) if netPathErr != nil { return nil, netPathErr } @@ -201,42 +200,19 @@ func getContainerNetIO(ctr *Container) (map[string]define.ContainerNetworkStats, return nil, nil } - netMode := ctr.config.NetMode - netStatus := ctr.getNetworkStatus() - if otherCtr != nil { - netMode = otherCtr.config.NetMode - netStatus = otherCtr.getNetworkStatus() - } - if netMode.IsSlirp4netns() { - // create a fake status with correct interface name for the logic below - netStatus = map[string]types.StatusBlock{ - "slirp4netns": { - Interfaces: map[string]types.NetInterface{"tap0": {}}, - }, - } - } err := ns.WithNetNSPath(netNSPath, func(_ ns.NetNS) error { - for _, status := range netStatus { - for dev := range status.Interfaces { - link, err := netlink.LinkByName(dev) - if err != nil { - return err - } - stats := link.Attrs().Statistics - if stats != nil { - newStats := define.ContainerNetworkStats{ - RxBytes: stats.RxBytes, - RxDropped: stats.RxDropped, - RxErrors: stats.RxErrors, - RxPackets: stats.RxPackets, - TxBytes: stats.TxBytes, - TxDropped: stats.TxDropped, - TxErrors: stats.TxErrors, - TxPackets: stats.TxPackets, - } + links, err := netlink.LinkList() + if err != nil { + return fmt.Errorf("retrieving all network interfaces: %w", err) + } + for _, link := range links { + attributes := link.Attrs() + if attributes.Flags&net.FlagLoopback != 0 { + continue + } - perNetworkStats[dev] = newStats - } + if attributes.Statistics != nil { + perNetworkStats[attributes.Name] = getNetStatsFromNetlinkStats(attributes.Statistics) } } return nil @@ -244,6 +220,19 @@ func getContainerNetIO(ctr *Container) (map[string]define.ContainerNetworkStats, return perNetworkStats, err } +func getNetStatsFromNetlinkStats(stats *netlink.LinkStatistics) define.ContainerNetworkStats { + return define.ContainerNetworkStats{ + RxBytes: stats.RxBytes, + RxDropped: stats.RxDropped, + RxErrors: stats.RxErrors, + RxPackets: stats.RxPackets, + TxBytes: stats.TxBytes, + TxDropped: stats.TxDropped, + TxErrors: stats.TxErrors, + TxPackets: stats.TxPackets, + } +} + // joinedNetworkNSPath returns netns path and bool if netns was set func (c *Container) joinedNetworkNSPath() (string, bool) { for _, namespace := range c.config.Spec.Linux.Namespaces { @@ -310,13 +299,3 @@ func (c *Container) inspectJoinedNetworkNS(networkns string) (q types.StatusBloc }) return result, err } - -func getPastaIP(state *ContainerState) (net.IP, error) { - var ip string - err := ns.WithNetNSPath(state.NetNS, func(_ ns.NetNS) error { - // get the first ip in the netns - ip = netUtil.GetLocalIP() - return nil - }) - return net.ParseIP(ip), err -} diff --git a/libpod/networking_pasta_linux.go b/libpod/networking_pasta_linux.go index 97e8118e7c..7934bc5917 100644 --- a/libpod/networking_pasta_linux.go +++ b/libpod/networking_pasta_linux.go @@ -12,10 +12,15 @@ package libpod import "github.com/containers/common/libnetwork/pasta" func (r *Runtime) setupPasta(ctr *Container, netns string) error { - return pasta.Setup(&pasta.SetupOptions{ + res, err := pasta.Setup2(&pasta.SetupOptions{ Config: r.config, Netns: netns, Ports: ctr.convertPortMappings(), ExtraOptions: ctr.config.NetworkOptions[pasta.BinaryName], }) + if err != nil { + return err + } + ctr.pastaResult = res + return nil } diff --git a/libpod/oci_conmon_attach_common.go b/libpod/oci_conmon_attach_common.go index a1d102fc18..c69cbfbf55 100644 --- a/libpod/oci_conmon_attach_common.go +++ b/libpod/oci_conmon_attach_common.go @@ -3,7 +3,6 @@ package libpod import ( - "context" "errors" "fmt" "io" @@ -32,8 +31,9 @@ const ( // Attach to the given container. // Does not check if state is appropriate. // started is only required if startContainer is true. +// It does not wait for the container to be healthy, it is the caller responsibility to do so. func (r *ConmonOCIRuntime) Attach(c *Container, params *AttachOptions) error { - passthrough := c.LogDriver() == define.PassthroughLogging + passthrough := c.LogDriver() == define.PassthroughLogging || c.LogDriver() == define.PassthroughTTYLogging if params == nil || params.Streams == nil { return fmt.Errorf("must provide parameters to Attach: %w", define.ErrInternal) @@ -86,7 +86,7 @@ func (r *ConmonOCIRuntime) Attach(c *Container, params *AttachOptions) error { // If starting was requested, start the container and notify when that's // done. if params.Start { - if err := c.start(context.TODO()); err != nil { + if err := c.start(); err != nil { return err } params.Started <- true diff --git a/libpod/oci_conmon_common.go b/libpod/oci_conmon_common.go index 73179378f6..c32fba46e2 100644 --- a/libpod/oci_conmon_common.go +++ b/libpod/oci_conmon_common.go @@ -183,23 +183,11 @@ func hasCurrentUserMapped(ctr *Container) bool { // CreateContainer creates a container. func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (int64, error) { - // always make the run dir accessible to the current user so that the PID files can be read without - // being in the rootless user namespace. - if err := makeAccessible(ctr.state.RunDir, 0, 0); err != nil { - return 0, err - } if !hasCurrentUserMapped(ctr) { - for _, i := range []string{ctr.state.RunDir, ctr.runtime.config.Engine.TmpDir, ctr.config.StaticDir, ctr.state.Mountpoint, ctr.runtime.config.Engine.VolumePath} { - if err := makeAccessible(i, ctr.RootUID(), ctr.RootGID()); err != nil { - return 0, err - } - } - // if we are running a non privileged container, be sure to umount some kernel paths so they are not // bind mounted inside the container at all. - if !ctr.config.Privileged && !rootless.IsRootless() { - return r.createRootlessContainer(ctr, restoreOptions) - } + hideFiles := !ctr.config.Privileged && !rootless.IsRootless() + return r.createRootlessContainer(ctr, restoreOptions, hideFiles) } return r.createOCIContainer(ctr, restoreOptions) } @@ -232,29 +220,31 @@ func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error { return fmt.Errorf("getting stderr pipe: %w", err) } - if err := cmd.Start(); err != nil { - out, err2 := io.ReadAll(errPipe) - if err2 != nil { - return fmt.Errorf("getting container %s state: %w", ctr.ID(), err) - } - if strings.Contains(string(out), "does not exist") || strings.Contains(string(out), "No such file") { - if err := ctr.removeConmonFiles(); err != nil { - logrus.Debugf("unable to remove conmon files for container %s", ctr.ID()) - } - ctr.state.ExitCode = -1 - ctr.state.FinishedTime = time.Now() - ctr.state.State = define.ContainerStateExited - return ctr.runtime.state.AddContainerExitCode(ctr.ID(), ctr.state.ExitCode) - } - return fmt.Errorf("getting container %s state. stderr/out: %s: %w", ctr.ID(), out, err) + err = cmd.Start() + if err != nil { + return fmt.Errorf("error launching container runtime: %w", err) } defer func() { _ = cmd.Wait() }() + stderr, err := io.ReadAll(errPipe) + if err != nil { + return fmt.Errorf("reading stderr: %s: %w", ctr.ID(), err) + } + if strings.Contains(string(stderr), "does not exist") || strings.Contains(string(stderr), "No such file") { + if err := ctr.removeConmonFiles(); err != nil { + logrus.Debugf("unable to remove conmon files for container %s", ctr.ID()) + } + ctr.state.ExitCode = -1 + ctr.state.FinishedTime = time.Now() + ctr.state.State = define.ContainerStateExited + return ctr.runtime.state.AddContainerExitCode(ctr.ID(), ctr.state.ExitCode) + } if err := errPipe.Close(); err != nil { return err } + out, err := io.ReadAll(outPipe) if err != nil { return fmt.Errorf("reading stdout: %s: %w", ctr.ID(), err) @@ -340,6 +330,7 @@ func generateResourceFile(res *spec.LinuxResources) (string, []string, error) { if err != nil { return "", nil, err } + defer f.Close() j, err := json.Marshal(res) if err != nil { @@ -976,28 +967,6 @@ func (r *ConmonOCIRuntime) RuntimeInfo() (*define.ConmonInfo, *define.OCIRuntime return &conmon, &ocirt, nil } -// makeAccessible changes the path permission and each parent directory to have --x--x--x -func makeAccessible(path string, uid, gid int) error { - for ; path != "/"; path = filepath.Dir(path) { - st, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - if int(st.Sys().(*syscall.Stat_t).Uid) == uid && int(st.Sys().(*syscall.Stat_t).Gid) == gid { - continue - } - if st.Mode()&0111 != 0111 { - if err := os.Chmod(path, st.Mode()|0111); err != nil { - return err - } - } - } - return nil -} - // Wait for a container which has been sent a signal to stop func waitContainerStop(ctr *Container, timeout time.Duration) error { return waitPidStop(ctr.state.PID, timeout) @@ -1417,7 +1386,7 @@ func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, p logDriverArg = define.JournaldLogging case define.NoLogging: logDriverArg = define.NoLogging - case define.PassthroughLogging: + case define.PassthroughLogging, define.PassthroughTTYLogging: logDriverArg = define.PassthroughLogging //lint:ignore ST1015 the default case has to be here default: //nolint:gocritic diff --git a/libpod/oci_conmon_freebsd.go b/libpod/oci_conmon_freebsd.go index 5f113f5cba..f681f785a1 100644 --- a/libpod/oci_conmon_freebsd.go +++ b/libpod/oci_conmon_freebsd.go @@ -8,7 +8,7 @@ import ( "os/exec" ) -func (r *ConmonOCIRuntime) createRootlessContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (int64, error) { +func (r *ConmonOCIRuntime) createRootlessContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions, hideFiles bool) (int64, error) { return -1, errors.New("unsupported (*ConmonOCIRuntime) createRootlessContainer") } diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go index e624894000..05dc65d360 100644 --- a/libpod/oci_conmon_linux.go +++ b/libpod/oci_conmon_linux.go @@ -3,7 +3,9 @@ package libpod import ( + "errors" "fmt" + "io/fs" "os" "os/exec" "path/filepath" @@ -25,7 +27,7 @@ import ( "golang.org/x/sys/unix" ) -func (r *ConmonOCIRuntime) createRootlessContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (int64, error) { +func (r *ConmonOCIRuntime) createRootlessContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions, hideFiles bool) (int64, error) { type result struct { restoreDuration int64 err error @@ -40,35 +42,88 @@ func (r *ConmonOCIRuntime) createRootlessContainer(ctr *Container, restoreOption } defer errorhandling.CloseQuiet(fd) + rootPath, err := ctr.getRootPathForOCI() + if err != nil { + return 0, err + } + // create a new mountns on the current thread if err = unix.Unshare(unix.CLONE_NEWNS); err != nil { return 0, err } defer func() { - if err := unix.Setns(int(fd.Fd()), unix.CLONE_NEWNS); err != nil { - logrus.Errorf("Unable to clone new namespace: %q", err) + err := unix.Setns(int(fd.Fd()), unix.CLONE_NEWNS) + if err == nil { + // If we are able to reset the previous mount namespace, unlock the thread and reuse it + runtime.UnlockOSThread() + } else { + // otherwise, leave the thread locked and the Go runtime will terminate it + logrus.Errorf("Unable to reset the previous mount namespace: %q", err) } }() - - // don't spread our mounts around. We are setting only /sys to be slave - // so that the cleanup process is still able to umount the storage and the - // changes are propagated to the host. - err = unix.Mount("/sys", "/sys", "none", unix.MS_REC|unix.MS_SLAVE, "") - if err != nil { - return 0, fmt.Errorf("cannot make /sys slave: %w", err) - } - mounts, err := pmount.GetMounts() if err != nil { return 0, err } - for _, m := range mounts { - if !strings.HasPrefix(m.Mountpoint, "/sys/kernel") { - continue + if rootPath != "" { + byMountpoint := make(map[string]*pmount.Info) + for _, m := range mounts { + byMountpoint[m.Mountpoint] = m + } + isShared := false + var parentMount string + for dir := filepath.Dir(rootPath); ; dir = filepath.Dir(dir) { + if m, found := byMountpoint[dir]; found { + parentMount = dir + for _, o := range strings.Split(m.Optional, ",") { + opt := strings.Split(o, ":") + if opt[0] == "shared" { + isShared = true + break + } + } + break + } + if dir == "/" { + return 0, fmt.Errorf("cannot find mountpoint for the root path") + } + } + + // do not propagate the bind mount on the parent mount namespace + if err := unix.Mount("", parentMount, "", unix.MS_SLAVE, ""); err != nil { + return 0, fmt.Errorf("failed to make %s slave: %w", parentMount, err) + } + + // bind mount the containers' mount path to the path where the OCI runtime expects it to be + if err := unix.Mount(ctr.state.Mountpoint, rootPath, "", unix.MS_BIND, ""); err != nil { + return 0, fmt.Errorf("failed to bind mount %s to %s: %w", ctr.state.Mountpoint, rootPath, err) + } + + if isShared { + // we need to restore the shared propagation of the parent mount so that we don't break -v $SRC:$DST:shared in the container + // if $SRC is on the same mount as the root path + if err := unix.Mount("", parentMount, "", unix.MS_SHARED, ""); err != nil { + return 0, fmt.Errorf("failed to restore MS_SHARED propagation for %s: %w", parentMount, err) + } + } + } + + if hideFiles { + // don't spread our mounts around. We are setting only /sys to be slave + // so that the cleanup process is still able to umount the storage and the + // changes are propagated to the host. + err = unix.Mount("/sys", "/sys", "none", unix.MS_REC|unix.MS_SLAVE, "") + if err != nil { + return 0, fmt.Errorf("cannot make /sys slave: %w", err) } - err = unix.Unmount(m.Mountpoint, 0) - if err != nil && !os.IsNotExist(err) { - return 0, fmt.Errorf("cannot unmount %s: %w", m.Mountpoint, err) + for _, m := range mounts { + if !strings.HasPrefix(m.Mountpoint, "/sys/kernel") { + continue + } + err = unix.Unmount(m.Mountpoint, 0) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return 0, fmt.Errorf("cannot unmount %s: %w", m.Mountpoint, err) + } } } return r.createOCIContainer(ctr, restoreOptions) diff --git a/libpod/options.go b/libpod/options.go index e8dd7b931c..9f30f2f325 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "net" - "os" "strings" "syscall" "time" @@ -23,6 +22,7 @@ import ( "github.com/containers/podman/v5/pkg/specgen" "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/regexp" "github.com/opencontainers/runtime-spec/specs-go" @@ -83,6 +83,13 @@ func WithStorageConfig(config storage.StoreOptions) RuntimeOption { copy(rt.storageConfig.GIDMap, config.GIDMap) } + if config.PullOptions != nil { + rt.storageConfig.PullOptions = make(map[string]string) + for k, v := range config.PullOptions { + rt.storageConfig.PullOptions[k] = v + } + } + // If any one of runroot, graphroot, graphdrivername, // or graphdriveroptions are set, then GraphRoot and RunRoot // must be set @@ -268,7 +275,7 @@ func WithStaticDir(dir string) RuntimeOption { func WithRegistriesConf(path string) RuntimeOption { logrus.Debugf("Setting custom registries.conf: %q", path) return func(rt *Runtime) error { - if _, err := os.Stat(path); err != nil { + if err := fileutils.Exists(path); err != nil { return fmt.Errorf("locating specified registries.conf: %w", err) } if rt.imageContext == nil { @@ -557,7 +564,7 @@ func WithShmDir(dir string) CtrCreateOption { } } -// WithNOShmMount tells libpod whether to mount /dev/shm +// WithNoShm tells libpod whether to mount /dev/shm func WithNoShm(mount bool) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { @@ -1101,7 +1108,7 @@ func WithLogDriver(driver string) CtrCreateOption { switch driver { case "": return fmt.Errorf("log driver must be set: %w", define.ErrInvalidArg) - case define.JournaldLogging, define.KubernetesLogging, define.JSONLogging, define.NoLogging, define.PassthroughLogging: + case define.JournaldLogging, define.KubernetesLogging, define.JSONLogging, define.NoLogging, define.PassthroughLogging, define.PassthroughTTYLogging: break default: return fmt.Errorf("invalid log driver: %w", define.ErrInvalidArg) @@ -1329,7 +1336,7 @@ func WithRootFS(rootfs string, overlay bool, mapping *string) CtrCreateOption { if ctr.valid { return define.ErrCtrFinalized } - if _, err := os.Stat(rootfs); err != nil { + if err := fileutils.Exists(rootfs); err != nil { return err } ctr.config.Rootfs = rootfs @@ -1392,13 +1399,12 @@ func WithRestartPolicy(policy string) CtrCreateOption { return define.ErrCtrFinalized } - switch policy { - case define.RestartPolicyNone, define.RestartPolicyNo, define.RestartPolicyOnFailure, define.RestartPolicyAlways, define.RestartPolicyUnlessStopped: - ctr.config.RestartPolicy = policy - default: - return fmt.Errorf("%q is not a valid restart policy: %w", policy, define.ErrInvalidArg) + if err := define.ValidateRestartPolicy(policy); err != nil { + return err } + ctr.config.RestartPolicy = policy + return nil } } @@ -1475,6 +1481,7 @@ func WithImageVolumes(volumes []*ContainerImageVolume) CtrCreateOption { Dest: vol.Dest, Source: vol.Source, ReadWrite: vol.ReadWrite, + SubPath: vol.SubPath, }) } @@ -1827,7 +1834,7 @@ func WithSecrets(containerSecrets []*ContainerSecret) CtrCreateOption { } } -// WithSecrets adds environment variable secrets to the container +// WithEnvSecrets adds environment variable secrets to the container func WithEnvSecrets(envSecrets map[string]string) CtrCreateOption { return func(ctr *Container) error { ctr.config.EnvSecrets = make(map[string]*secrets.Secret) @@ -2085,7 +2092,7 @@ func WithPodCgroupParent(path string) PodCreateOption { } } -// WithPodCgroups tells containers in this pod to use the cgroup created for +// WithPodParent tells containers in this pod to use the cgroup created for // this pod. // This can still be overridden at the container level by explicitly specifying // a Cgroup parent. diff --git a/libpod/plugin/volume_api.go b/libpod/plugin/volume_api.go index bc9f675f4c..834d9c9800 100644 --- a/libpod/plugin/volume_api.go +++ b/libpod/plugin/volume_api.go @@ -16,6 +16,7 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/podman/v5/libpod/define" + "github.com/containers/storage/pkg/fileutils" "github.com/docker/go-plugins-helpers/sdk" "github.com/docker/go-plugins-helpers/volume" jsoniter "github.com/json-iterator/go" @@ -188,7 +189,7 @@ func (p *VolumePlugin) getURI() string { // Verify the plugin is still available. // Does not actually ping the API, just verifies that the socket still exists. func (p *VolumePlugin) verifyReachable() error { - if _, err := os.Stat(p.SocketPath); err != nil { + if err := fileutils.Exists(p.SocketPath); err != nil { if os.IsNotExist(err) { pluginsLock.Lock() defer pluginsLock.Unlock() diff --git a/libpod/reset.go b/libpod/reset.go index 187bcbf4ba..c223aeb5df 100644 --- a/libpod/reset.go +++ b/libpod/reset.go @@ -11,6 +11,7 @@ import ( "github.com/containers/common/libimage" "github.com/containers/common/libnetwork/types" + blobinfocache "github.com/containers/image/v5/pkg/blobinfocache" "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/pkg/errorhandling" "github.com/containers/podman/v5/pkg/util" @@ -108,13 +109,13 @@ func (r *Runtime) Reset(ctx context.Context) error { return define.ErrRuntimeStopped } - var timeout *uint + var timeout uint = 0 pods, err := r.GetAllPods() if err != nil { return err } for _, p := range pods { - if ctrs, err := r.RemovePod(ctx, p, true, true, timeout); err != nil { + if ctrs, err := r.RemovePod(ctx, p, true, true, &timeout); err != nil { if errors.Is(err, define.ErrNoSuchPod) { continue } @@ -133,7 +134,7 @@ func (r *Runtime) Reset(ctx context.Context) error { } for _, c := range ctrs { - if ctrs, _, err := r.RemoveContainerAndDependencies(ctx, c, true, true, timeout); err != nil { + if ctrs, _, err := r.RemoveContainerAndDependencies(ctx, c, true, true, &timeout); err != nil { for ctr, err := range ctrs { logrus.Errorf("Error removing container %s: %v", ctr, err) } @@ -163,7 +164,7 @@ func (r *Runtime) Reset(ctx context.Context) error { return err } for _, v := range volumes { - if err := r.RemoveVolume(ctx, v, true, timeout); err != nil { + if err := r.RemoveVolume(ctx, v, true, &timeout); err != nil { if errors.Is(err, define.ErrNoSuchVolume) { continue } @@ -257,6 +258,14 @@ func (r *Runtime) Reset(ctx context.Context) error { prevError = err } } + + if err := blobinfocache.CleanupDefaultCache(nil); err != nil { + if prevError != nil { + logrus.Error(prevError) + } + prevError = err + } + if storageConfPath, err := storage.DefaultConfigFile(); err == nil { switch storageConfPath { case stypes.SystemConfigFile: diff --git a/libpod/runtime.go b/libpod/runtime.go index 50b2c75eb9..7ad2c23bbf 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -15,8 +15,6 @@ import ( "syscall" "time" - "golang.org/x/sys/unix" - "github.com/containers/buildah/pkg/parse" "github.com/containers/common/libimage" "github.com/containers/common/libnetwork/network" @@ -33,16 +31,20 @@ import ( "github.com/containers/podman/v5/libpod/lock" "github.com/containers/podman/v5/libpod/plugin" "github.com/containers/podman/v5/libpod/shutdown" + "github.com/containers/podman/v5/pkg/domain/entities" "github.com/containers/podman/v5/pkg/rootless" "github.com/containers/podman/v5/pkg/systemd" "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/unshare" "github.com/docker/docker/pkg/namesgenerator" + "github.com/hashicorp/go-multierror" jsoniter "github.com/json-iterator/go" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) // Set up the JSON library for all of Libpod @@ -141,7 +143,7 @@ func SetXdgDirs() error { if rootless.IsRootless() && os.Getenv("DBUS_SESSION_BUS_ADDRESS") == "" { sessionAddr := filepath.Join(runtimeDir, "bus") - if _, err := os.Stat(sessionAddr); err == nil { + if err := fileutils.Exists(sessionAddr); err == nil { os.Setenv("DBUS_SESSION_BUS_ADDRESS", fmt.Sprintf("unix:path=%s", sessionAddr)) } } @@ -309,7 +311,7 @@ func getDBState(runtime *Runtime) (State, error) { switch backend { case config.DBBackendDefault: // for backwards compatibility check if boltdb exists, if it does not we use sqlite - if _, err := os.Stat(boltDBPath); err != nil { + if err := fileutils.Exists(boltDBPath); err != nil { if errors.Is(err, fs.ErrNotExist) { // need to set DBBackend string so podman info will show the backend name correctly runtime.config.Engine.DBBackend = config.DBBackendSQLite.String() @@ -392,32 +394,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) { runtime.mergeDBConfig(dbConfig) - unified, _ := cgroups.IsCgroup2UnifiedMode() - // DELETE ON RHEL9 - if !unified { - _, ok := os.LookupEnv("PODMAN_IGNORE_CGROUPSV1_WARNING") - if !ok { - logrus.Warn("Using cgroups-v1 which is deprecated in favor of cgroups-v2 with Podman v5 and will be removed in a future version. Set environment variable `PODMAN_IGNORE_CGROUPSV1_WARNING` to hide this warning.") - } - } - // DELETE ON RHEL9 - - if unified && rootless.IsRootless() && !systemd.IsSystemdSessionValid(rootless.GetRootlessUID()) { - // If user is rootless and XDG_RUNTIME_DIR is found, podman will not proceed with /tmp directory - // it will try to use existing XDG_RUNTIME_DIR - // if current user has no write access to XDG_RUNTIME_DIR we will fail later - if err := unix.Access(runtime.storageConfig.RunRoot, unix.W_OK); err != nil { - msg := fmt.Sprintf("RunRoot is pointing to a path (%s) which is not writable. Most likely podman will fail.", runtime.storageConfig.RunRoot) - if errors.Is(err, os.ErrNotExist) { - // if dir does not exist, try to create it - if err := os.MkdirAll(runtime.storageConfig.RunRoot, 0700); err != nil { - logrus.Warn(msg) - } - } else { - logrus.Warnf("%s: %v", msg, err) - } - } - } + checkCgroups2UnifiedMode(runtime) logrus.Debugf("Using graph driver %s", runtime.storageConfig.GraphDriverName) logrus.Debugf("Using graph root %s", runtime.storageConfig.GraphRoot) @@ -464,7 +441,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) { } } - return err + return fmt.Errorf("configure storage: %w", err) } defer func() { if retErr != nil && store != nil { @@ -570,7 +547,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) { } }() - _, err = os.Stat(runtimeAliveFile) + err = fileutils.Exists(runtimeAliveFile) if err != nil { // If we need to refresh, then it is safe to assume there are // no containers running. Create immediately a namespace, as @@ -642,7 +619,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) { // Ensure we have a store before refresh occurs if runtime.store == nil { if err := runtime.configureStore(); err != nil { - return err + return fmt.Errorf("configure storage: %w", err) } } @@ -651,6 +628,11 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) { } } + // Check current boot ID - will be written to the alive file. + if err := runtime.checkBootID(runtimeAliveFile); err != nil { + return err + } + runtime.startWorker() return nil @@ -696,15 +678,16 @@ func (r *Runtime) GetConfig() (*config.Config, error) { // libimageEventsMap translates a libimage event type to a libpod event status. var libimageEventsMap = map[libimage.EventType]events.Status{ - libimage.EventTypeImagePull: events.Pull, - libimage.EventTypeImagePush: events.Push, - libimage.EventTypeImageRemove: events.Remove, - libimage.EventTypeImageLoad: events.LoadFromArchive, - libimage.EventTypeImageSave: events.Save, - libimage.EventTypeImageTag: events.Tag, - libimage.EventTypeImageUntag: events.Untag, - libimage.EventTypeImageMount: events.Mount, - libimage.EventTypeImageUnmount: events.Unmount, + libimage.EventTypeImagePull: events.Pull, + libimage.EventTypeImagePullError: events.PullError, + libimage.EventTypeImagePush: events.Push, + libimage.EventTypeImageRemove: events.Remove, + libimage.EventTypeImageLoad: events.LoadFromArchive, + libimage.EventTypeImageSave: events.Save, + libimage.EventTypeImageTag: events.Tag, + libimage.EventTypeImageUntag: events.Untag, + libimage.EventTypeImageMount: events.Mount, + libimage.EventTypeImageUnmount: events.Unmount, } // libimageEvents spawns a goroutine which will listen for events on @@ -736,6 +719,9 @@ func (r *Runtime) libimageEvents() { Time: libimageEvent.Time, Type: events.Image, } + if libimageEvent.Error != nil { + e.Error = libimageEvent.Error.Error() + } if err := r.eventer.Write(e); err != nil { logrus.Errorf("Unable to write image event: %q", err) } @@ -1266,3 +1252,133 @@ func (r *Runtime) LockConflicts() (map[uint32][]string, []uint32, error) { return toReturn, locksHeld, nil } + +// SystemCheck checks our storage for consistency, and depending on the options +// specified, will attempt to remove anything which fails consistency checks. +func (r *Runtime) SystemCheck(ctx context.Context, options entities.SystemCheckOptions) (entities.SystemCheckReport, error) { + what := storage.CheckEverything() + if options.Quick { + what = storage.CheckMost() + } + if options.UnreferencedLayerMaximumAge != nil { + tmp := *options.UnreferencedLayerMaximumAge + what.LayerUnreferencedMaximumAge = &tmp + } + storageReport, err := r.store.Check(what) + if err != nil { + return entities.SystemCheckReport{}, err + } + if len(storageReport.Containers) == 0 && + len(storageReport.Layers) == 0 && + len(storageReport.ROLayers) == 0 && + len(storageReport.Images) == 0 && + len(storageReport.ROImages) == 0 { + // no errors detected + return entities.SystemCheckReport{}, nil + } + mapErrorSlicesToStringSlices := func(m map[string][]error) map[string][]string { + if len(m) == 0 { + return nil + } + mapped := make(map[string][]string, len(m)) + for k, errs := range m { + strs := make([]string, len(errs)) + for i, e := range errs { + strs[i] = e.Error() + } + mapped[k] = strs + } + return mapped + } + + report := entities.SystemCheckReport{ + Errors: true, + Layers: mapErrorSlicesToStringSlices(storageReport.Layers), + ROLayers: mapErrorSlicesToStringSlices(storageReport.ROLayers), + Images: mapErrorSlicesToStringSlices(storageReport.Images), + ROImages: mapErrorSlicesToStringSlices(storageReport.ROImages), + Containers: mapErrorSlicesToStringSlices(storageReport.Containers), + } + if !options.Repair && report.Errors { + // errors detected, no corrective measures to be taken + return report, err + } + + // get a list of images that we knew of before we tried to clean up any + // that were damaged + imagesBefore, err := r.store.Images() + if err != nil { + return report, fmt.Errorf("getting a list of images before attempting repairs: %w", err) + } + + repairOptions := storage.RepairOptions{ + RemoveContainers: options.RepairLossy, + } + var containers []*Container + if repairOptions.RemoveContainers { + // build a list of the containers that we claim as ours that we + // expect to be removing in a bit + for containerID := range storageReport.Containers { + ctr, lookupErr := r.state.LookupContainer(containerID) + if lookupErr != nil { + // we're about to remove it, so it's okay that + // it isn't even one of ours + continue + } + containers = append(containers, ctr) + } + } + + // run the cleanup + merr := multierror.Append(nil, r.store.Repair(storageReport, &repairOptions)...) + + if repairOptions.RemoveContainers { + // get the list of containers that storage will still admit to knowing about + containersAfter, err := r.store.Containers() + if err != nil { + merr = multierror.Append(merr, fmt.Errorf("getting a list of containers after attempting repairs: %w", err)) + } + for _, ctr := range containers { + // if one of our containers that we tried to remove is + // still on disk, report an error + if slices.IndexFunc(containersAfter, func(containerAfter storage.Container) bool { + return containerAfter.ID == ctr.ID() + }) != -1 { + merr = multierror.Append(merr, fmt.Errorf("clearing storage for container %s: %w", ctr.ID(), err)) + continue + } + // remove the container from our database + if removeErr := r.state.RemoveContainer(ctr); removeErr != nil { + merr = multierror.Append(merr, fmt.Errorf("updating state database to reflect removal of container %s: %w", ctr.ID(), removeErr)) + continue + } + if report.RemovedContainers == nil { + report.RemovedContainers = make(map[string]string) + } + report.RemovedContainers[ctr.ID()] = ctr.config.Name + } + } + + // get a list of images that are still around after we clean up any + // that were damaged + imagesAfter, err := r.store.Images() + if err != nil { + merr = multierror.Append(merr, fmt.Errorf("getting a list of images after attempting repairs: %w", err)) + } + for _, imageBefore := range imagesBefore { + if slices.IndexFunc(imagesAfter, func(imageAfter storage.Image) bool { + return imageAfter.ID == imageBefore.ID + }) == -1 { + if report.RemovedImages == nil { + report.RemovedImages = make(map[string][]string) + } + report.RemovedImages[imageBefore.ID] = slices.Clone(imageBefore.Names) + } + } + + if merr != nil { + err = merr.ErrorOrNil() + } + + return report, err +} diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index 9501969d90..8770d572ee 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -10,6 +10,7 @@ import ( "os" "path" "path/filepath" + "slices" "strings" "time" @@ -30,7 +31,6 @@ import ( spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // Contains the public Runtime API for containers @@ -585,7 +585,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai } if ctr.runtime.config.Engine.EventsContainerCreateInspectData { - if err := ctr.newContainerEventWithInspectData(events.Create, true); err != nil { + if err := ctr.newContainerEventWithInspectData(events.Create, "", true); err != nil { return nil, err } } else { diff --git a/libpod/runtime_freebsd.go b/libpod/runtime_freebsd.go new file mode 100644 index 0000000000..7a9aea6c12 --- /dev/null +++ b/libpod/runtime_freebsd.go @@ -0,0 +1,10 @@ +//go:build !remote + +package libpod + +func checkCgroups2UnifiedMode(runtime *Runtime) { +} + +func (r *Runtime) checkBootID(runtimeAliveFile string) error { + return nil +} diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go index db70c4c108..585e1f0217 100644 --- a/libpod/runtime_img.go +++ b/libpod/runtime_img.go @@ -105,7 +105,7 @@ func (r *Runtime) IsExternalContainerCallback(_ context.Context) libimage.IsExte } } -// newBuildEvent creates a new event based on completion of a built image +// newImageBuildCompleteEvent creates a new event based on completion of a built image func (r *Runtime) newImageBuildCompleteEvent(idOrName string) { e := events.NewEvent(events.Build) e.Type = events.Image diff --git a/libpod/runtime_linux.go b/libpod/runtime_linux.go new file mode 100644 index 0000000000..065a11959c --- /dev/null +++ b/libpod/runtime_linux.go @@ -0,0 +1,67 @@ +//go:build !remote + +package libpod + +import ( + "errors" + "fmt" + "os" + + "golang.org/x/sys/unix" + + "github.com/containers/common/pkg/cgroups" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/systemd" + "github.com/sirupsen/logrus" +) + +func checkCgroups2UnifiedMode(runtime *Runtime) { + unified, _ := cgroups.IsCgroup2UnifiedMode() + // DELETE ON RHEL9 + if !unified { + _, ok := os.LookupEnv("PODMAN_IGNORE_CGROUPSV1_WARNING") + if !ok { + logrus.Warn("Using cgroups-v1 which is deprecated in favor of cgroups-v2 with Podman v5 and will be removed in a future version. Set environment variable `PODMAN_IGNORE_CGROUPSV1_WARNING` to hide this warning.") + } + } + // DELETE ON RHEL9 + + if unified && rootless.IsRootless() && !systemd.IsSystemdSessionValid(rootless.GetRootlessUID()) { + // If user is rootless and XDG_RUNTIME_DIR is found, podman will not proceed with /tmp directory + // it will try to use existing XDG_RUNTIME_DIR + // if current user has no write access to XDG_RUNTIME_DIR we will fail later + if err := unix.Access(runtime.storageConfig.RunRoot, unix.W_OK); err != nil { + msg := fmt.Sprintf("RunRoot is pointing to a path (%s) which is not writable. Most likely podman will fail.", runtime.storageConfig.RunRoot) + if errors.Is(err, os.ErrNotExist) { + // if dir does not exist, try to create it + if err := os.MkdirAll(runtime.storageConfig.RunRoot, 0700); err != nil { + logrus.Warn(msg) + } + } else { + logrus.Warnf("%s: %v", msg, err) + } + } + } +} + +// Check the current boot ID against the ID cached in the runtime alive file. +func (r *Runtime) checkBootID(runtimeAliveFile string) error { + systemBootID, err := os.ReadFile("/proc/sys/kernel/random/boot_id") + if err == nil { + podmanBootID, err := os.ReadFile(runtimeAliveFile) + if err != nil { + return fmt.Errorf("reading boot ID from runtime alive file: %w", err) + } + if len(podmanBootID) != 0 { + if string(systemBootID) != string(podmanBootID) { + return fmt.Errorf("current system boot ID differs from cached boot ID; an unhandled reboot has occurred. Please delete directories %q and %q and re-run Podman", r.storageConfig.RunRoot, r.config.Engine.TmpDir) + } + } else { + // Write the current boot ID to the alive file. + if err := os.WriteFile(runtimeAliveFile, systemBootID, 0644); err != nil { + return fmt.Errorf("writing boot ID to runtime alive file: %w", err) + } + } + } + return nil +} diff --git a/libpod/runtime_pod.go b/libpod/runtime_pod.go index 86b0780df2..086dba0bdb 100644 --- a/libpod/runtime_pod.go +++ b/libpod/runtime_pod.go @@ -6,10 +6,10 @@ import ( "context" "errors" "fmt" + "slices" "time" "github.com/containers/podman/v5/libpod/define" - "golang.org/x/exp/slices" ) // Contains the public Runtime API for pods diff --git a/libpod/runtime_volume_common.go b/libpod/runtime_volume_common.go index 2f982fce74..44d4f667d7 100644 --- a/libpod/runtime_volume_common.go +++ b/libpod/runtime_volume_common.go @@ -16,6 +16,7 @@ import ( volplugin "github.com/containers/podman/v5/libpod/plugin" "github.com/containers/storage" "github.com/containers/storage/drivers/quota" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/stringid" pluginapi "github.com/docker/go-plugins-helpers/volume" @@ -84,7 +85,7 @@ func (r *Runtime) newVolume(ctx context.Context, noCreatePluginVolume bool, opti switch strings.ToLower(key) { case "device": if strings.ToLower(volume.config.Options["type"]) == define.TypeBind { - if _, err := os.Stat(val); err != nil { + if err := fileutils.Exists(val); err != nil { return nil, fmt.Errorf("invalid volume option %s for driver 'local': %w", key, err) } } diff --git a/libpod/sqlite_state.go b/libpod/sqlite_state.go index 9a9f793e86..bee2210648 100644 --- a/libpod/sqlite_state.go +++ b/libpod/sqlite_state.go @@ -996,7 +996,7 @@ func (s *SQLiteState) GetContainerExitCodeTimeStamp(id string) (*time.Time, erro return &result, nil } -// PruneExitCodes removes exit codes older than 5 minutes unless the associated +// PruneContainerExitCodes removes exit codes older than 5 minutes unless the associated // container still exists. func (s *SQLiteState) PruneContainerExitCodes() (defErr error) { if !s.valid { @@ -1310,7 +1310,7 @@ func (s *SQLiteState) RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) } }() - results, err := tx.Exec("UPDATE VolumeConfig SET Name=?, JSON=? WHERE ID=?;", newCfg.Name, json, volume.Name()) + results, err := tx.Exec("UPDATE VolumeConfig SET Name=?, JSON=? WHERE Name=?;", newCfg.Name, json, volume.Name()) if err != nil { return fmt.Errorf("updating volume config table with new configuration for volume %s: %w", volume.Name(), err) } diff --git a/libpod/stats_freebsd.go b/libpod/stats_freebsd.go index 9dbb8b12fd..538c8f5023 100644 --- a/libpod/stats_freebsd.go +++ b/libpod/stats_freebsd.go @@ -83,7 +83,7 @@ func (c *Container) getPlatformContainerStats(stats *define.ContainerStats, prev return nil } -// getMemory limit returns the memory limit for a container +// getMemLimit returns the memory limit for a container func (c *Container) getMemLimit() uint64 { memLimit := uint64(math.MaxUint64) diff --git a/libpod/stats_linux.go b/libpod/stats_linux.go index 19ce554cdc..36f861aa3e 100644 --- a/libpod/stats_linux.go +++ b/libpod/stats_linux.go @@ -69,7 +69,7 @@ func (c *Container) getPlatformContainerStats(stats *define.ContainerStats, prev return nil } -// getMemory limit returns the memory limit for a container +// getMemLimit returns the memory limit for a container func (c *Container) getMemLimit(memLimit uint64) uint64 { si := &syscall.Sysinfo_t{} err := syscall.Sysinfo(si) diff --git a/libpod/util.go b/libpod/util.go index 3b30b100d1..195dbeee16 100644 --- a/libpod/util.go +++ b/libpod/util.go @@ -20,6 +20,7 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/pkg/api/handlers/utils/apiutil" + "github.com/containers/storage/pkg/fileutils" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" @@ -104,14 +105,14 @@ func DefaultSeccompPath() (string, error) { return def.Containers.SeccompProfile, nil } - _, err = os.Stat(config.SeccompOverridePath) + err = fileutils.Exists(config.SeccompOverridePath) if err == nil { return config.SeccompOverridePath, nil } if !os.IsNotExist(err) { return "", err } - if _, err := os.Stat(config.SeccompDefaultPath); err != nil { + if err := fileutils.Exists(config.SeccompDefaultPath); err != nil { if !os.IsNotExist(err) { return "", err } @@ -217,7 +218,7 @@ func writeHijackHeader(r *http.Request, conn io.Writer, tty bool) { // Upgraded fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: %s\r\nConnection: Upgrade\r\nUpgrade: %s\r\n\r\n", - proto, header) + header, proto) } } @@ -235,8 +236,12 @@ func makeInspectPorts(bindings []types.PortMapping, expose map[uint16][]string) for i := uint16(0); i < port.Range; i++ { key := fmt.Sprintf("%d/%s", port.ContainerPort+i, protocol) hostPorts := portBindings[key] + var hostIP = port.HostIP + if len(port.HostIP) == 0 { + hostIP = "0.0.0.0" + } hostPorts = append(hostPorts, define.InspectHostPort{ - HostIP: port.HostIP, + HostIP: hostIP, HostPort: strconv.FormatUint(uint64(port.HostPort+i), 10), }) portBindings[key] = hostPorts diff --git a/libpod/util_linux.go b/libpod/util_linux.go index 1d83475922..ac83857eed 100644 --- a/libpod/util_linux.go +++ b/libpod/util_linux.go @@ -5,7 +5,6 @@ package libpod import ( "errors" "fmt" - "os" "path/filepath" "strings" "syscall" @@ -13,6 +12,7 @@ import ( "github.com/containers/common/pkg/cgroups" "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/storage/pkg/fileutils" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" @@ -27,8 +27,7 @@ func cgroupExist(path string) bool { } else { fullPath = filepath.Join("/sys/fs/cgroup/memory", path) } - _, err := os.Stat(fullPath) - return err == nil + return fileutils.Exists(fullPath) == nil } // systemdSliceFromPath makes a new systemd slice under the given parent with diff --git a/libpod/util_test.go b/libpod/util_test.go deleted file mode 100644 index baa596f777..0000000000 --- a/libpod/util_test.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build !remote - -package libpod - -import ( - "testing" - - "github.com/containers/podman/v5/utils" - "github.com/stretchr/testify/assert" -) - -func TestRemoveScientificNotationFromFloat(t *testing.T) { - numbers := []float64{0.0, .5, 1.99999932, 1.04e+10} - results := []float64{0.0, .5, 1.99999932, 1.04} - for i, x := range numbers { - result, err := utils.RemoveScientificNotationFromFloat(x) - assert.NoError(t, err) - assert.Equal(t, result, results[i]) - } -} diff --git a/libpod/volume.go b/libpod/volume.go index 4b5a224f80..05acb2a6e5 100644 --- a/libpod/volume.go +++ b/libpod/volume.go @@ -98,6 +98,10 @@ type VolumeState struct { // a container, the container will chown the volume to the container process // UID/GID. NeedsChown bool `json:"notYetChowned,omitempty"` + // Indicates that a copy-up event occurred during the current mount of + // the volume into a container. + // We use this to determine if a chown is appropriate. + CopiedUp bool `json:"copiedUp,omitempty"` // UIDChowned is the UID the volume was chowned to. UIDChowned int `json:"uidChowned,omitempty"` // GIDChowned is the GID the volume was chowned to. diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go index 3d308c8620..e512582305 100644 --- a/libpod/volume_internal.go +++ b/libpod/volume_internal.go @@ -110,4 +110,5 @@ func (v *Volume) refresh() error { func resetVolumeState(state *VolumeState) { state.MountCount = 0 state.MountPoint = "" + state.CopiedUp = false } diff --git a/pkg/annotations/annotations.go b/pkg/annotations/annotations.go index 5d3cb992ac..d6db0cf03d 100644 --- a/pkg/annotations/annotations.go +++ b/pkg/annotations/annotations.go @@ -1,108 +1,9 @@ package annotations const ( - // Annotations carries the received Kubelet annotations. - Annotations = "io.kubernetes.cri-o.Annotations" - - // ContainerID is the container ID annotation. - ContainerID = "io.kubernetes.cri-o.ContainerID" - - // ContainerName is the container name annotation. - ContainerName = "io.kubernetes.cri-o.ContainerName" - - // ContainerType is the container type (sandbox or container) annotation. - ContainerType = "io.kubernetes.cri-o.ContainerType" - - // Created is the container creation time annotation. - Created = "io.kubernetes.cri-o.Created" - - // HostName is the container host name annotation. - HostName = "io.kubernetes.cri-o.HostName" - - // CgroupParent is the sandbox cgroup parent. - CgroupParent = "io.kubernetes.cri-o.CgroupParent" - - // IP is the container ipv4 or ipv6 address. - IP = "io.kubernetes.cri-o.IP" - - // NamespaceOptions store the options for namespaces. - NamespaceOptions = "io.kubernetes.cri-o.NamespaceOptions" - - // SeccompProfilePath is the node seccomp profile path. - SeccompProfilePath = "io.kubernetes.cri-o.SeccompProfilePath" - - // Image is the container image ID annotation. - Image = "io.kubernetes.cri-o.Image" - - // ImageName is the container image name annotation. - ImageName = "io.kubernetes.cri-o.ImageName" - - // ImageRef is the container image ref annotation. - ImageRef = "io.kubernetes.cri-o.ImageRef" - - // KubeName is the kubernetes name annotation. - KubeName = "io.kubernetes.cri-o.KubeName" - - // PortMappings holds the port mappings for the sandbox. - PortMappings = "io.kubernetes.cri-o.PortMappings" - - // Labels are the kubernetes labels annotation. - Labels = "io.kubernetes.cri-o.Labels" - - // LogPath is the container logging path annotation. - LogPath = "io.kubernetes.cri-o.LogPath" - - // Metadata is the container metadata annotation. - Metadata = "io.kubernetes.cri-o.Metadata" - - // Name is the pod name annotation. - Name = "io.kubernetes.cri-o.Name" - - // Namespace is the pod namespace annotation. - Namespace = "io.kubernetes.cri-o.Namespace" - - // PrivilegedRuntime is the annotation for the privileged runtime path. - PrivilegedRuntime = "io.kubernetes.cri-o.PrivilegedRuntime" - - // ResolvPath is the resolver configuration path annotation. - ResolvPath = "io.kubernetes.cri-o.ResolvPath" - - // HostnamePath is the path to /etc/hostname to bind mount annotation. - HostnamePath = "io.kubernetes.cri-o.HostnamePath" - // SandboxID is the sandbox ID annotation. SandboxID = "io.kubernetes.cri-o.SandboxID" - // SandboxName is the sandbox name annotation. - SandboxName = "io.kubernetes.cri-o.SandboxName" - - // ShmPath is the shared memory path annotation. - ShmPath = "io.kubernetes.cri-o.ShmPath" - - // MountPoint is the mount point of the container rootfs. - MountPoint = "io.kubernetes.cri-o.MountPoint" - - // RuntimeHandler is the annotation for runtime handler. - RuntimeHandler = "io.kubernetes.cri-o.RuntimeHandler" - - // TTY is the terminal path annotation. - TTY = "io.kubernetes.cri-o.TTY" - - // Stdin is the stdin annotation. - Stdin = "io.kubernetes.cri-o.Stdin" - - // StdinOnce is the stdin_once annotation. - StdinOnce = "io.kubernetes.cri-o.StdinOnce" - - // Volumes is the volumes annotation. - Volumes = "io.kubernetes.cri-o.Volumes" - - // HostNetwork indicates whether the host network namespace is used or not. - HostNetwork = "io.kubernetes.cri-o.HostNetwork" - - // CNIResult is the JSON string representation of the Result from CNI. - CNIResult = "io.kubernetes.cri-o.CNIResult" - // ContainerManager is the annotation key for indicating the creator and // manager of the container. ContainerManager = "io.container.manager" @@ -120,15 +21,3 @@ const ( // ContainerManagerLibpod indicates that libpod created and manages the // container. const ContainerManagerLibpod = "libpod" - -// IsReservedAnnotation returns true if the specified value corresponds to an -// already reserved annotation that Podman sets during container creation. -func IsReservedAnnotation(value string) bool { - switch value { - case Annotations, ContainerID, ContainerName, ContainerType, Created, HostName, CgroupParent, IP, NamespaceOptions, SeccompProfilePath, Image, ImageName, ImageRef, KubeName, PortMappings, Labels, LogPath, Metadata, Name, Namespace, PrivilegedRuntime, ResolvPath, HostnamePath, SandboxID, SandboxName, ShmPath, MountPoint, RuntimeHandler, TTY, Stdin, StdinOnce, Volumes, HostNetwork, CNIResult, ContainerManager: - return true - - default: - return false - } -} diff --git a/pkg/annotations/validate.go b/pkg/annotations/validate.go new file mode 100644 index 0000000000..4ddeea30ed --- /dev/null +++ b/pkg/annotations/validate.go @@ -0,0 +1,124 @@ +package annotations + +import ( + "fmt" + "regexp" + "strings" + + "github.com/containers/podman/v5/libpod/define" +) + +// regexErrorMsg returns a string explanation of a regex validation failure. +func regexErrorMsg(msg string, fmt string, examples ...string) string { + if len(examples) == 0 { + return msg + " (regex used for validation is '" + fmt + "')" + } + msg += " (e.g. " + for i := range examples { + if i > 0 { + msg += " or " + } + msg += "'" + examples[i] + "', " + } + msg += "regex used for validation is '" + fmt + "')" + return msg +} + +const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" +const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" +const dns1123SubdomainErrorMsg string = "annotations must be formatted as a valid lowercase RFC1123 subdomain of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" + +// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) +const DNS1123SubdomainMaxLength int = 253 + +var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") + +// isDNS1123Subdomain tests for a string that conforms to the definition of a +// subdomain in DNS (RFC 1123). +func isDNS1123Subdomain(value string) error { + if len(value) > DNS1123SubdomainMaxLength { + return fmt.Errorf("prefix part must be no more than %d characters", DNS1123SubdomainMaxLength) + } + + if !dns1123SubdomainRegexp.MatchString(value) { + return fmt.Errorf(regexErrorMsg(dns1123SubdomainErrorMsg, dns1123SubdomainFmt, "example.com")) + } + + return nil +} + +const qnameCharFmt string = "[A-Za-z0-9]" +const qnameExtCharFmt string = "[-A-Za-z0-9_.]" +const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt +const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" +const qualifiedNameMaxLength int = 63 + +var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$") + +// isQualifiedName tests whether the value passed is what Kubernetes calls a +// "qualified name". This is a format used in various places throughout the +// system. If the value is not valid, a list of error strings is returned. +// Otherwise an empty list (or nil) is returned. +func isQualifiedName(value string) error { + parts := strings.Split(value, "/") + var name string + + switch len(parts) { + case 1: + name = parts[0] + case 2: + var prefix string + prefix, name = parts[0], parts[1] + if len(prefix) == 0 { + return fmt.Errorf("prefix part of %s must be non-empty", value) + } else if err := isDNS1123Subdomain(prefix); err != nil { + return err + } + default: + return fmt.Errorf("a qualified name of %s "+ + regexErrorMsg(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")+ + " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')", value) + } + + if len(name) == 0 { + return fmt.Errorf("name part of %s must be non-empty", value) + } else if len(name) > qualifiedNameMaxLength { + return fmt.Errorf("name part of %s must be no more than %d characters", value, qualifiedNameMaxLength) + } + + if !qualifiedNameRegexp.MatchString(name) { + return fmt.Errorf("name part of %s "+ + regexErrorMsg(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc"), value) + } + + return nil +} + +func validateAnnotationsSize(annotations map[string]string) error { + var totalSize int64 + for k, v := range annotations { + totalSize += (int64)(len(k)) + (int64)(len(v)) + } + if totalSize > (int64)(define.TotalAnnotationSizeLimitB) { + return fmt.Errorf("annotations size %d is larger than limit %d", totalSize, define.TotalAnnotationSizeLimitB) + } + return nil +} + +// ValidateAnnotations validates that a set of annotations are correctly +// defined. +func ValidateAnnotations(annotations map[string]string) error { + for k := range annotations { + // The rule is QualifiedName except that case doesn't matter, + // so convert to lowercase before checking. + if err := isQualifiedName(strings.ToLower(k)); err != nil { + return err + } + } + + if err := validateAnnotationsSize(annotations); err != nil { + return err + } + + return nil +} diff --git a/pkg/annotations/validate_test.go b/pkg/annotations/validate_test.go new file mode 100644 index 0000000000..b29385fcf9 --- /dev/null +++ b/pkg/annotations/validate_test.go @@ -0,0 +1,80 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "strings" + "testing" + + "github.com/containers/podman/v5/libpod/define" +) + +func TestValidateAnnotations(t *testing.T) { + successCases := []map[string]string{ + {"simple": "bar"}, + {"now-with-dashes": "bar"}, + {"1-starts-with-num": "bar"}, + {"1234": "bar"}, + {"simple/simple": "bar"}, + {"now-with-dashes/simple": "bar"}, + {"now-with-dashes/now-with-dashes": "bar"}, + {"now.with.dots/simple": "bar"}, + {"now-with.dashes-and.dots/simple": "bar"}, + {"1-num.2-num/3-num": "bar"}, + {"1234/5678": "bar"}, + {"1.2.3.4/5678": "bar"}, + {"UpperCase123": "bar"}, + {"a": strings.Repeat("b", define.TotalAnnotationSizeLimitB-1)}, + { + "a": strings.Repeat("b", define.TotalAnnotationSizeLimitB/2-1), + "c": strings.Repeat("d", define.TotalAnnotationSizeLimitB/2-1), + }, + } + + for i := range successCases { + if err := ValidateAnnotations(successCases[i]); err != nil { + t.Errorf("case[%d] expected success, got %v", i, err) + } + } + + nameErrorCases := []map[string]string{ + {"nospecialchars^=@": "bar"}, + {"cantendwithadash-": "bar"}, + {"only/one/slash": "bar"}, + {strings.Repeat("a", 254): "bar"}, + } + + for i := range nameErrorCases { + if err := ValidateAnnotations(nameErrorCases[i]); err == nil { + t.Errorf("case[%d]: expected failure", i) + } + } + + totalSizeErrorCases := []map[string]string{ + {"a": strings.Repeat("b", define.TotalAnnotationSizeLimitB)}, + { + "a": strings.Repeat("b", define.TotalAnnotationSizeLimitB/2), + "c": strings.Repeat("d", define.TotalAnnotationSizeLimitB/2), + }, + } + + for i := range totalSizeErrorCases { + if err := ValidateAnnotations(totalSizeErrorCases[i]); err == nil { + t.Errorf("case[%d] expected failure", i) + } + } +} diff --git a/pkg/api/Makefile b/pkg/api/Makefile index 49ebfb422e..747967b1c6 100644 --- a/pkg/api/Makefile +++ b/pkg/api/Makefile @@ -1,13 +1,15 @@ SWAGGER_OUT ?= swagger.yaml +SWAGGER ?= ../../test/tools/build/swagger + validate: ${SWAGGER_OUT} - swagger validate ${SWAGGER_OUT} + $(SWAGGER) validate ${SWAGGER_OUT} serve: ${SWAGGER_OUT} - swagger serve -F redoc -p=8080 swagger.yaml + $(SWAGGER) serve -F redoc -p=8080 swagger.yaml .PHONY: ${SWAGGER_OUT} ${SWAGGER_OUT}: # generate doesn't remove file on error rm -f ${SWAGGER_OUT} - swagger generate spec -x github.com/sigstore/rekor -o ${SWAGGER_OUT} -i tags.yaml -w ./ -m + $(SWAGGER) generate spec -x github.com/sigstore/rekor -o ${SWAGGER_OUT} -i tags.yaml -w ./ -m diff --git a/pkg/api/handlers/compat/containers.go b/pkg/api/handlers/compat/containers.go index e72db326b2..08061c1e48 100644 --- a/pkg/api/handlers/compat/containers.go +++ b/pkg/api/handlers/compat/containers.go @@ -28,6 +28,7 @@ import ( "github.com/docker/docker/api/types/network" "github.com/docker/go-connections/nat" "github.com/docker/go-units" + spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) @@ -661,3 +662,133 @@ func RenameContainer(w http.ResponseWriter, r *http.Request) { utils.WriteResponse(w, http.StatusNoContent, nil) } + +func UpdateContainer(w http.ResponseWriter, r *http.Request) { + runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime) + name := utils.GetName(r) + + ctr, err := runtime.LookupContainer(name) + if err != nil { + utils.ContainerNotFound(w, name, err) + return + } + + options := new(container.UpdateConfig) + if err := json.NewDecoder(r.Body).Decode(options); err != nil { + utils.Error(w, http.StatusInternalServerError, fmt.Errorf("decoding request body: %w", err)) + return + } + + // Only handle the bits of update that Docker uses as examples. + // For example, the update API claims to be able to update devices for + // existing containers... Which I am very dubious about. + // Ignore bits like that unless someone asks us for them. + + // We're going to be editing this, so we have to deep-copy to not affect + // the container's own resources + resources := new(spec.LinuxResources) + oldResources := ctr.LinuxResources() + if oldResources != nil { + if err := libpod.JSONDeepCopy(oldResources, resources); err != nil { + utils.Error(w, http.StatusInternalServerError, fmt.Errorf("copying old resource limits: %w", err)) + return + } + } + + // CPU limits + cpu := resources.CPU + if cpu == nil { + cpu = new(spec.LinuxCPU) + } + useCPU := false + if options.CPUShares != 0 { + shares := uint64(options.CPUShares) + cpu.Shares = &shares + useCPU = true + } + if options.CPUPeriod != 0 { + period := uint64(options.CPUPeriod) + cpu.Period = &period + useCPU = true + } + if options.CPUQuota != 0 { + cpu.Quota = &options.CPUQuota + useCPU = true + } + if options.CPURealtimeRuntime != 0 { + cpu.RealtimeRuntime = &options.CPURealtimeRuntime + useCPU = true + } + if options.CPURealtimePeriod != 0 { + period := uint64(options.CPURealtimePeriod) + cpu.RealtimePeriod = &period + useCPU = true + } + if options.CpusetCpus != "" { + cpu.Cpus = options.CpusetCpus + useCPU = true + } + if options.CpusetMems != "" { + cpu.Mems = options.CpusetMems + useCPU = true + } + if useCPU { + resources.CPU = cpu + } + + // Memory limits + mem := resources.Memory + if mem == nil { + mem = new(spec.LinuxMemory) + } + useMem := false + if options.Memory != 0 { + mem.Limit = &options.Memory + useMem = true + } + if options.MemorySwap != 0 { + mem.Swap = &options.MemorySwap + useMem = true + } + if options.MemoryReservation != 0 { + mem.Reservation = &options.MemoryReservation + useMem = true + } + if useMem { + resources.Memory = mem + } + + // PIDs limit + if options.PidsLimit != nil { + if resources.Pids == nil { + resources.Pids = new(spec.LinuxPids) + } + resources.Pids.Limit = *options.PidsLimit + } + + // Blkio Weight + if options.BlkioWeight != 0 { + if resources.BlockIO == nil { + resources.BlockIO = new(spec.LinuxBlockIO) + } + resources.BlockIO.Weight = &options.BlkioWeight + } + + // Restart policy + localPolicy := string(options.RestartPolicy.Name) + restartPolicy := &localPolicy + + var restartRetries *uint + if options.RestartPolicy.MaximumRetryCount != 0 { + localRetries := uint(options.RestartPolicy.MaximumRetryCount) + restartRetries = &localRetries + } + + if err := ctr.Update(resources, restartPolicy, restartRetries); err != nil { + utils.Error(w, http.StatusInternalServerError, fmt.Errorf("updating container: %w", err)) + return + } + + responseStruct := container.ContainerUpdateOKBody{} + utils.WriteResponse(w, http.StatusOK, responseStruct) +} diff --git a/pkg/api/handlers/compat/containers_create.go b/pkg/api/handlers/compat/containers_create.go index c7579ade9c..cef33d37dd 100644 --- a/pkg/api/handlers/compat/containers_create.go +++ b/pkg/api/handlers/compat/containers_create.go @@ -27,6 +27,7 @@ import ( "github.com/containers/podman/v5/pkg/specgen" "github.com/containers/podman/v5/pkg/specgenutil" "github.com/containers/storage" + "github.com/containers/storage/pkg/fileutils" "github.com/docker/docker/api/types/mount" ) @@ -373,7 +374,17 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C } } - networks[netName] = netOpts + // Report configuration error in case bridge mode is not used. + if !nsmode.IsBridge() && (len(netOpts.Aliases) > 0 || len(netOpts.StaticIPs) > 0 || len(netOpts.StaticMAC) > 0) { + return nil, nil, fmt.Errorf("networks and static ip/mac address can only be used with Bridge mode networking") + } else if nsmode.IsBridge() { + // Docker CLI now always sends the end point config when using the default (bridge) mode + // however podman configuration doesn't expect this to define this at all when not in bridge + // mode and the podman server config might override the default network mode to something + // else than bridge. So adapt to the podman expectation and define custom end point config + // only when really using the bridge mode. + networks[netName] = netOpts + } } netInfo.Networks = networks @@ -441,6 +452,7 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C ReadOnly: cc.HostConfig.ReadonlyRootfs, ReadWriteTmpFS: true, // podman default Rm: cc.HostConfig.AutoRemove, + Annotation: stringMaptoArray(cc.HostConfig.Annotations), SecurityOpt: cc.HostConfig.SecurityOpt, StopSignal: cc.Config.StopSignal, StopTimeout: rtc.Engine.StopTimeout, // podman default @@ -516,7 +528,7 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C continue } // If volume already exists, there is nothing to do - if _, err := os.Stat(vol); err == nil { + if err := fileutils.Exists(vol); err == nil { continue } if err := os.MkdirAll(vol, 0o755); err != nil { diff --git a/pkg/api/handlers/compat/events.go b/pkg/api/handlers/compat/events.go index ceb1493e03..4ab12eb022 100644 --- a/pkg/api/handlers/compat/events.go +++ b/pkg/api/handlers/compat/events.go @@ -43,7 +43,7 @@ func GetEvents(w http.ResponseWriter, r *http.Request) { libpodFilters, err := util.FiltersFromRequest(r) if err != nil { - utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err)) + utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse filters for %s: %w", r.URL.String(), err)) return } eventChannel := make(chan *events.Event) @@ -68,8 +68,13 @@ func GetEvents(w http.ResponseWriter, r *http.Request) { } w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - flush() + wroteContent := false + defer func() { + if !wroteContent { + w.WriteHeader(http.StatusOK) + flush() + } + }() coder := json.NewEncoder(w) coder.SetEscapeHTML(true) @@ -78,8 +83,8 @@ func GetEvents(w http.ResponseWriter, r *http.Request) { select { case err := <-errorChannel: if err != nil { - // FIXME StatusOK already sent above cannot send 500 here utils.InternalServerError(w, err) + wroteContent = true } return case evt := <-eventChannel: @@ -103,6 +108,7 @@ func GetEvents(w http.ResponseWriter, r *http.Request) { if err := coder.Encode(e); err != nil { logrus.Errorf("Unable to write json: %q", err) } + wroteContent = true flush() case <-r.Context().Done(): return diff --git a/pkg/api/handlers/compat/images.go b/pkg/api/handlers/compat/images.go index 238277a7d5..8dde5f1f61 100644 --- a/pkg/api/handlers/compat/images.go +++ b/pkg/api/handlers/compat/images.go @@ -483,7 +483,7 @@ func GetImages(w http.ResponseWriter, r *http.Request) { imageEngine := abi.ImageEngine{Libpod: runtime} - listOptions := entities.ImageListOptions{All: query.All, Filter: filterList} + listOptions := entities.ImageListOptions{All: query.All, Filter: filterList, ExtendedAttributes: utils.IsLibpodRequest(r)} summaries, err := imageEngine.List(r.Context(), listOptions) if err != nil { utils.Error(w, http.StatusInternalServerError, err) diff --git a/pkg/api/handlers/compat/images_build.go b/pkg/api/handlers/compat/images_build.go index 93a1100c38..19a5338791 100644 --- a/pkg/api/handlers/compat/images_build.go +++ b/pkg/api/handlers/compat/images_build.go @@ -27,6 +27,7 @@ import ( "github.com/containers/podman/v5/pkg/rootless" "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/fileutils" "github.com/docker/docker/pkg/jsonmessage" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" @@ -50,7 +51,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { } } - contextDirectory, err := extractTarFile(r) + anchorDir, err := os.MkdirTemp("", "libpod_builder") if err != nil { utils.InternalServerError(w, err) return @@ -64,12 +65,18 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { } } } - err := os.RemoveAll(filepath.Dir(contextDirectory)) + err := os.RemoveAll(anchorDir) if err != nil { - logrus.Warn(fmt.Errorf("failed to remove build scratch directory %q: %w", filepath.Dir(contextDirectory), err)) + logrus.Warn(fmt.Errorf("failed to remove build scratch directory %q: %w", anchorDir, err)) } }() + contextDirectory, err := extractTarFile(anchorDir, r) + if err != nil { + utils.InternalServerError(w, err) + return + } + query := struct { AddHosts string `schema:"extrahosts"` AdditionalCapabilities string `schema:"addcaps"` @@ -242,9 +249,9 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { containerFiles = []string{filepath.Join(contextDirectory, "Dockerfile")} if utils.IsLibpodRequest(r) { containerFiles = []string{filepath.Join(contextDirectory, "Containerfile")} - if _, err = os.Stat(containerFiles[0]); err != nil { + if err = fileutils.Exists(containerFiles[0]); err != nil { containerFiles = []string{filepath.Join(contextDirectory, "Dockerfile")} - if _, err1 := os.Stat(containerFiles[0]); err1 != nil { + if err1 := fileutils.Exists(containerFiles[0]); err1 != nil { utils.BadRequest(w, "dockerfile", query.Dockerfile, err) return } @@ -377,10 +384,19 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { return } - // make sure to force rootless as rootless otherwise buildah runs code which is intended to be run only as root. - if isolation == buildah.IsolationOCI && rootless.IsRootless() { - isolation = buildah.IsolationOCIRootless + // Make sure to force rootless as rootless otherwise buildah runs code which is intended to be run only as root. + // Same the other way around: https://github.com/containers/podman/issues/22109 + switch isolation { + case buildah.IsolationOCI: + if rootless.IsRootless() { + isolation = buildah.IsolationOCIRootless + } + case buildah.IsolationOCIRootless: + if !rootless.IsRootless() { + isolation = buildah.IsolationOCI + } } + registry = "" format = query.OutputFormat } else { @@ -722,7 +738,12 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { UnsetLabels: query.UnsetLabels, } - for _, platformSpec := range query.Platform { + platforms := query.Platform + if len(platforms) == 1 { + // Docker API uses comma sperated platform arg so match this here + platforms = strings.Split(query.Platform[0], ",") + } + for _, platformSpec := range platforms { os, arch, variant, err := parse.Platform(platformSpec) if err != nil { utils.BadRequest(w, "platform", platformSpec, err) @@ -884,33 +905,13 @@ func parseLibPodIsolation(isolation string) (buildah.Isolation, error) { return parse.IsolationOption(isolation) } -func extractTarFile(r *http.Request) (string, error) { - // build a home for the request body - anchorDir, err := os.MkdirTemp("", "libpod_builder") - if err != nil { - return "", err - } - - path := filepath.Join(anchorDir, "tarBall") - tarBall, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600) - if err != nil { - return "", err - } - defer tarBall.Close() - - // Content-Length not used as too many existing API clients didn't honor it - _, err = io.Copy(tarBall, r.Body) - if err != nil { - return "", fmt.Errorf("failed Request: Unable to copy tar file from request body %s", r.RequestURI) - } - +func extractTarFile(anchorDir string, r *http.Request) (string, error) { buildDir := filepath.Join(anchorDir, "build") - err = os.Mkdir(buildDir, 0o700) + err := os.Mkdir(buildDir, 0o700) if err != nil { return "", err } - _, _ = tarBall.Seek(0, 0) - err = archive.Untar(tarBall, buildDir, nil) + err = archive.Untar(r.Body, buildDir, nil) return buildDir, err } diff --git a/pkg/api/handlers/compat/info.go b/pkg/api/handlers/compat/info.go index 94b7bdfdbd..d4c2f5f649 100644 --- a/pkg/api/handlers/compat/info.go +++ b/pkg/api/handlers/compat/info.go @@ -51,7 +51,6 @@ func GetInfo(w http.ResponseWriter, r *http.Request) { // FIXME: Need to expose if runtime supports Checkpointing // liveRestoreEnabled := criu.CheckForCriu() && configInfo.RuntimeSupportsCheckpoint() - info := &handlers.Info{ Info: dockerSystem.Info{ Architecture: goRuntime.GOARCH, @@ -193,6 +192,9 @@ func getSecOpts(sysInfo *sysinfo.SysInfo) []string { func getRuntimes(configInfo *config.Config) map[string]dockerSystem.RuntimeWithStatus { runtimes := map[string]dockerSystem.RuntimeWithStatus{} for name, paths := range configInfo.Engine.OCIRuntimes { + if len(paths) == 0 { + continue + } runtime := dockerSystem.RuntimeWithStatus{} runtime.Runtime = dockerSystem.Runtime{Path: paths[0], Args: nil} runtimes[name] = runtime diff --git a/pkg/api/handlers/compat/networks.go b/pkg/api/handlers/compat/networks.go index a727225692..b9130a5a7f 100644 --- a/pkg/api/handlers/compat/networks.go +++ b/pkg/api/handlers/compat/networks.go @@ -17,41 +17,12 @@ import ( "github.com/containers/podman/v5/pkg/domain/infra/abi" "github.com/containers/podman/v5/pkg/util" "github.com/docker/docker/api/types" + "golang.org/x/exp/maps" dockerNetwork "github.com/docker/docker/api/types/network" "github.com/sirupsen/logrus" ) -type containerNetStatus struct { - name string - id string - status map[string]nettypes.StatusBlock -} - -func getContainerNetStatuses(rt *libpod.Runtime) ([]containerNetStatus, error) { - cons, err := rt.GetAllContainers() - if err != nil { - return nil, err - } - statuses := make([]containerNetStatus, 0, len(cons)) - for _, con := range cons { - status, err := con.GetNetworkStatus() - if err != nil { - if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) { - continue - } - return nil, err - } - - statuses = append(statuses, containerNetStatus{ - id: con.ID(), - name: con.Name(), - status: status, - }) - } - return statuses, nil -} - func normalizeNetworkName(rt *libpod.Runtime, name string) (string, bool) { if name == nettypes.BridgeNetworkDriver { return rt.Network().DefaultNetworkName(), true @@ -86,7 +57,8 @@ func InspectNetwork(w http.ResponseWriter, r *http.Request) { utils.NetworkNotFound(w, name, err) return } - statuses, err := getContainerNetStatuses(runtime) + ic := abi.ContainerEngine{Libpod: runtime} + statuses, err := ic.GetContainerNetStatuses() if err != nil { utils.InternalServerError(w, err) return @@ -95,10 +67,10 @@ func InspectNetwork(w http.ResponseWriter, r *http.Request) { utils.WriteResponse(w, http.StatusOK, report) } -func convertLibpodNetworktoDockerNetwork(runtime *libpod.Runtime, statuses []containerNetStatus, network *nettypes.Network, changeDefaultName bool) *types.NetworkResource { +func convertLibpodNetworktoDockerNetwork(runtime *libpod.Runtime, statuses []abi.ContainerNetStatus, network *nettypes.Network, changeDefaultName bool) *types.NetworkResource { containerEndpoints := make(map[string]types.EndpointResource, len(statuses)) for _, st := range statuses { - if netData, ok := st.status[network.Name]; ok { + if netData, ok := st.Status[network.Name]; ok { ipv4Address := "" ipv6Address := "" macAddr := "" @@ -116,12 +88,12 @@ func convertLibpodNetworktoDockerNetwork(runtime *libpod.Runtime, statuses []con break } containerEndpoint := types.EndpointResource{ - Name: st.name, + Name: st.Name, MacAddress: macAddr, IPv4Address: ipv4Address, IPv6Address: ipv6Address, } - containerEndpoints[st.id] = containerEndpoint + containerEndpoints[st.ID] = containerEndpoint } } ipamConfigs := make([]dockerNetwork.IPAMConfig, 0, len(network.Subnets)) @@ -147,7 +119,9 @@ func convertLibpodNetworktoDockerNetwork(runtime *libpod.Runtime, statuses []con if changeDefaultName && name == runtime.Network().DefaultNetworkName() { name = nettypes.BridgeNetworkDriver } - options := network.Options + // Make sure to clone the map as we have access to the map stored in + // the network backend and will overwrite it which is not good. + options := maps.Clone(network.Options) // bridge always has isolate set in the compat API but we should not return it to not confuse callers // https://github.com/containers/podman/issues/15580 delete(options, nettypes.IsolateOption) @@ -192,7 +166,7 @@ func ListNetworks(w http.ResponseWriter, r *http.Request) { utils.InternalServerError(w, err) return } - statuses, err := getContainerNetStatuses(runtime) + statuses, err := ic.GetContainerNetStatuses() if err != nil { utils.InternalServerError(w, err) return diff --git a/pkg/api/handlers/libpod/containers.go b/pkg/api/handlers/libpod/containers.go index e823cf0608..0dabd649bb 100644 --- a/pkg/api/handlers/libpod/containers.go +++ b/pkg/api/handlers/libpod/containers.go @@ -144,6 +144,11 @@ func GetContainer(w http.ResponseWriter, r *http.Request) { utils.InternalServerError(w, err) return } + // if client request old v4 payload we should return v4 compatible json + if _, err := utils.SupportedVersion(r, ">=5.0.0"); err != nil { + data.Config.V4PodmanCompatMarshal = true + } + utils.WriteResponse(w, http.StatusOK, data) } @@ -402,18 +407,46 @@ func InitContainer(w http.ResponseWriter, r *http.Request) { func UpdateContainer(w http.ResponseWriter, r *http.Request) { name := utils.GetName(r) runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime) + decoder := utils.GetDecoder(r) + query := struct { + RestartPolicy string `schema:"restartPolicy"` + RestartRetries uint `schema:"restartRetries"` + }{ + // override any golang type defaults + } + + if err := decoder.Decode(&query, r.URL.Query()); err != nil { + utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err)) + return + } + ctr, err := runtime.LookupContainer(name) if err != nil { utils.ContainerNotFound(w, name, err) return } + var restartPolicy *string + var restartRetries *uint + if query.RestartPolicy != "" { + restartPolicy = &query.RestartPolicy + if query.RestartPolicy == define.RestartPolicyOnFailure { + restartRetries = &query.RestartRetries + } else if query.RestartRetries != 0 { + utils.Error(w, http.StatusBadRequest, errors.New("cannot set restart retries unless restart policy is on-failure")) + return + } + } else if query.RestartRetries != 0 { + utils.Error(w, http.StatusBadRequest, errors.New("cannot set restart retries unless restart policy is set")) + return + } + options := &handlers.UpdateEntities{Resources: &specs.LinuxResources{}} if err := json.NewDecoder(r.Body).Decode(&options.Resources); err != nil { utils.Error(w, http.StatusInternalServerError, fmt.Errorf("decode(): %w", err)) return } - err = ctr.Update(options.Resources) + err = ctr.Update(options.Resources, restartPolicy, restartRetries) if err != nil { utils.InternalServerError(w, err) return diff --git a/pkg/api/handlers/libpod/containers_stats.go b/pkg/api/handlers/libpod/containers_stats.go index 41b0bfbaf7..2ed2006c91 100644 --- a/pkg/api/handlers/libpod/containers_stats.go +++ b/pkg/api/handlers/libpod/containers_stats.go @@ -62,18 +62,23 @@ func StatsContainer(w http.ResponseWriter, r *http.Request) { return } - // Write header and content type. - w.WriteHeader(http.StatusOK) - w.Header().Set("Content-Type", "application/json") - if flusher, ok := w.(http.Flusher); ok { - flusher.Flush() - } - + wroteContent := false // Set up JSON encoder for streaming. coder := json.NewEncoder(w) coder.SetEscapeHTML(true) for stats := range statsChan { + if !wroteContent { + if stats.Error != nil { + utils.ContainerNotFound(w, "", stats.Error) + return + } + // Write header and content type. + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + wroteContent = true + } + if err := coder.Encode(stats); err != nil { // Note: even when streaming, the stats goroutine will // be notified (and stop) as the connection will be diff --git a/pkg/api/handlers/libpod/images_push.go b/pkg/api/handlers/libpod/images_push.go index 8caf23ef36..ce9d5d19e1 100644 --- a/pkg/api/handlers/libpod/images_push.go +++ b/pkg/api/handlers/libpod/images_push.go @@ -32,6 +32,8 @@ func PushImage(w http.ResponseWriter, r *http.Request) { Destination string `schema:"destination"` Format string `schema:"format"` RemoveSignatures bool `schema:"removeSignatures"` + Retry uint `schema:"retry"` + RetryDelay string `schema:"retryDelay"` TLSVerify bool `schema:"tlsVerify"` Quiet bool `schema:"quiet"` }{ @@ -83,9 +85,14 @@ func PushImage(w http.ResponseWriter, r *http.Request) { Password: password, Quiet: query.Quiet, RemoveSignatures: query.RemoveSignatures, + RetryDelay: query.RetryDelay, Username: username, } + if _, found := r.URL.Query()["retry"]; found { + options.Retry = &query.Retry + } + if _, found := r.URL.Query()["compressionFormat"]; found { if _, foundForceCompression := r.URL.Query()["forceCompressionFormat"]; !foundForceCompression { // If `compressionFormat` is set and no value for `forceCompressionFormat` diff --git a/pkg/api/handlers/libpod/manifests.go b/pkg/api/handlers/libpod/manifests.go index 4d28cdd9ec..ee97aa1f12 100644 --- a/pkg/api/handlers/libpod/manifests.go +++ b/pkg/api/handlers/libpod/manifests.go @@ -8,8 +8,12 @@ import ( "io" "net/http" "net/url" + "os" + "path" + "path/filepath" "strconv" "strings" + "sync" "github.com/containers/common/libimage/define" "github.com/containers/image/v5/docker/reference" @@ -23,22 +27,24 @@ import ( "github.com/containers/podman/v5/pkg/channel" "github.com/containers/podman/v5/pkg/domain/entities" "github.com/containers/podman/v5/pkg/domain/infra/abi" - envLib "github.com/containers/podman/v5/pkg/env" "github.com/containers/podman/v5/pkg/errorhandling" "github.com/gorilla/mux" "github.com/gorilla/schema" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" ) func ManifestCreate(w http.ResponseWriter, r *http.Request) { runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime) decoder := r.Context().Value(api.DecoderKey).(*schema.Decoder) query := struct { - Name string `schema:"name"` - Images []string `schema:"images"` - All bool `schema:"all"` - Amend bool `schema:"amend"` + Name string `schema:"name"` + Images []string `schema:"images"` + All bool `schema:"all"` + Amend bool `schema:"amend"` + Annotation []string `schema:"annotation"` + Annotations map[string]string `schema:"annotations"` }{ // Add defaults here once needed. } @@ -73,7 +79,21 @@ func ManifestCreate(w http.ResponseWriter, r *http.Request) { imageEngine := abi.ImageEngine{Libpod: runtime} - createOptions := entities.ManifestCreateOptions{All: query.All, Amend: query.Amend} + annotations := maps.Clone(query.Annotations) + for _, annotation := range query.Annotation { + k, v, ok := strings.Cut(annotation, "=") + if !ok { + utils.Error(w, http.StatusBadRequest, + fmt.Errorf("invalid annotation %s", annotation)) + return + } + if annotations == nil { + annotations = make(map[string]string) + } + annotations[k] = v + } + + createOptions := entities.ManifestCreateOptions{All: query.All, Amend: query.Amend, Annotations: annotations} manID, err := imageEngine.ManifestCreate(r.Context(), query.Name, query.Images, createOptions) if err != nil { utils.InternalServerError(w, err) @@ -99,26 +119,29 @@ func ManifestCreate(w http.ResponseWriter, r *http.Request) { body := new(entities.ManifestModifyOptions) if err := json.Unmarshal(buffer, body); err != nil { - utils.InternalServerError(w, fmt.Errorf("Decode(): %w", err)) + utils.InternalServerError(w, fmt.Errorf("decoding modifications in request: %w", err)) return } - // gather all images for manifest list - var images []string - if len(query.Images) > 0 { - images = query.Images + if len(body.IndexAnnotation) != 0 || len(body.IndexAnnotations) != 0 || body.IndexSubject != "" { + manifestAnnotateOptions := entities.ManifestAnnotateOptions{ + IndexAnnotation: body.IndexAnnotation, + IndexAnnotations: body.IndexAnnotations, + IndexSubject: body.IndexSubject, + } + if _, err := imageEngine.ManifestAnnotate(r.Context(), manID, "", manifestAnnotateOptions); err != nil { + utils.InternalServerError(w, err) + return + } } if len(body.Images) > 0 { - images = body.Images - } - - id, err := imageEngine.ManifestAdd(r.Context(), query.Name, images, body.ManifestAddOptions) - if err != nil { - utils.InternalServerError(w, err) - return + if _, err := imageEngine.ManifestAdd(r.Context(), manID, body.Images, body.ManifestAddOptions); err != nil { + utils.InternalServerError(w, err) + return + } } - utils.WriteResponse(w, status, entities.IDResponse{ID: id}) + utils.WriteResponse(w, status, entities.IDResponse{ID: manID}) } // ManifestExists return true if manifest list exists. @@ -194,7 +217,7 @@ func ManifestAddV3(w http.ResponseWriter, r *http.Request) { TLSVerify bool `schema:"tlsVerify"` }{} if err := json.NewDecoder(r.Body).Decode(&query); err != nil { - utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err)) + utils.Error(w, http.StatusInternalServerError, fmt.Errorf("decoding AddV3 query: %w", err)) return } @@ -471,34 +494,138 @@ func ManifestModify(w http.ResponseWriter, r *http.Request) { imageEngine := abi.ImageEngine{Libpod: runtime} body := new(entities.ManifestModifyOptions) - if err := json.NewDecoder(r.Body).Decode(body); err != nil { - utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err)) - return + + multireader, err := r.MultipartReader() + if err != nil { + multireader = nil + // not multipart - request is just encoded JSON, nothing else + if err := json.NewDecoder(r.Body).Decode(body); err != nil { + utils.Error(w, http.StatusInternalServerError, fmt.Errorf("decoding modify request: %w", err)) + return + } + } else { + // multipart - request is encoded JSON in the first part, each artifact is its own part + bodyPart, err := multireader.NextPart() + if err != nil { + utils.Error(w, http.StatusInternalServerError, fmt.Errorf("reading first part of multipart request: %w", err)) + return + } + err = json.NewDecoder(bodyPart).Decode(body) + bodyPart.Close() + if err != nil { + utils.Error(w, http.StatusInternalServerError, fmt.Errorf("decoding modify request in multipart request: %w", err)) + return + } } name := utils.GetName(r) - if _, err := runtime.LibimageRuntime().LookupManifestList(name); err != nil { + manifestList, err := runtime.LibimageRuntime().LookupManifestList(name) + if err != nil { utils.Error(w, http.StatusNotFound, err) return } - if len(body.ManifestAddOptions.Annotation) != 0 { - if len(body.ManifestAddOptions.Annotations) != 0 { - utils.Error(w, http.StatusBadRequest, fmt.Errorf("can not set both Annotation and Annotations")) - return - } + annotationsFromAnnotationSlice := func(annotation []string) map[string]string { annotations := make(map[string]string) - for _, annotationSpec := range body.ManifestAddOptions.Annotation { + for _, annotationSpec := range annotation { key, val, hasVal := strings.Cut(annotationSpec, "=") if !hasVal { utils.Error(w, http.StatusBadRequest, fmt.Errorf("no value given for annotation %q", key)) - return + return nil } annotations[key] = val } - body.ManifestAddOptions.Annotations = envLib.Join(body.ManifestAddOptions.Annotations, annotations) + return annotations + } + if len(body.ManifestAddOptions.Annotation) != 0 { + if len(body.ManifestAddOptions.Annotations) != 0 { + utils.Error(w, http.StatusBadRequest, fmt.Errorf("can not set both Annotation and Annotations")) + return + } + body.ManifestAddOptions.Annotations = annotationsFromAnnotationSlice(body.ManifestAddOptions.Annotation) body.ManifestAddOptions.Annotation = nil } + if len(body.ManifestAddOptions.IndexAnnotation) != 0 { + if len(body.ManifestAddOptions.IndexAnnotations) != 0 { + utils.Error(w, http.StatusBadRequest, fmt.Errorf("can not set both IndexAnnotation and IndexAnnotations")) + return + } + body.ManifestAddOptions.IndexAnnotations = annotationsFromAnnotationSlice(body.ManifestAddOptions.IndexAnnotation) + body.ManifestAddOptions.IndexAnnotation = nil + } + + var artifactExtractionError error + var artifactExtraction sync.WaitGroup + if multireader != nil { + // If the data was multipart, then save items from it into a + // directory that will be removed along with this list, + // whenever that happens. + artifactExtraction.Add(1) + go func() { + defer artifactExtraction.Done() + storageConfig := runtime.StorageConfig() + // FIXME: knowing that this is the location of the + // per-image-record-stuff directory is a little too + // "inside storage" + fileDir, err := os.MkdirTemp(filepath.Join(runtime.GraphRoot(), storageConfig.GraphDriverName+"-images", manifestList.ID()), "") + if err != nil { + artifactExtractionError = err + return + } + // We'll be building a list of the names of files we + // received as part of the request and setting it in + // the request body before we're done. + var contentFiles []string + part, err := multireader.NextPart() + if err != nil { + artifactExtractionError = err + return + } + for part != nil { + partName := part.FormName() + if filename := part.FileName(); filename != "" { + partName = filename + } + if partName != "" { + partName = path.Base(partName) + } + // Write the file in a scope that lets us close it as quickly + // as possible. + if err = func() error { + defer part.Close() + var f *os.File + // Create the file. + if partName != "" { + // Try to use the supplied name. + f, err = os.OpenFile(filepath.Join(fileDir, partName), os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0o600) + } else { + // No supplied name means they don't care. + f, err = os.CreateTemp(fileDir, "upload") + } + if err != nil { + return err + } + defer f.Close() + // Write the file's contents. + if _, err = io.Copy(f, part); err != nil { + return err + } + contentFiles = append(contentFiles, f.Name()) + return nil + }(); err != nil { + break + } + part, err = multireader.NextPart() + } + // If we stowed all of the uploaded files without issue, we're all good. + if err != nil && !errors.Is(err, io.EOF) { + artifactExtractionError = err + return + } + // Save the list of files that we created. + body.ArtifactFiles = contentFiles + }() + } if tlsVerify, ok := r.URL.Query()["tlsVerify"]; ok { tls, err := strconv.ParseBool(tlsVerify[len(tlsVerify)-1]) @@ -527,17 +654,50 @@ func ManifestModify(w http.ResponseWriter, r *http.Request) { body.ManifestAddOptions.CertDir = sys.DockerCertPath } - var report entities.ManifestModifyReport + report := entities.ManifestModifyReport{ID: manifestList.ID()} switch { case strings.EqualFold("update", body.Operation): - id, err := imageEngine.ManifestAdd(r.Context(), name, body.Images, body.ManifestAddOptions) - if err != nil { - report.Errors = append(report.Errors, err) - break + if len(body.Images) > 0 { + id, err := imageEngine.ManifestAdd(r.Context(), name, body.Images, body.ManifestAddOptions) + if err != nil { + report.Errors = append(report.Errors, err) + break + } + report.ID = id + report.Images = body.Images } - report = entities.ManifestModifyReport{ - ID: id, - Images: body.Images, + if multireader != nil { + // Wait for the extraction goroutine to finish + // processing the message in the request body, so that + // we know whether or not everything looked alright. + artifactExtraction.Wait() + if artifactExtractionError != nil { + report.Errors = append(report.Errors, artifactExtractionError) + artifactExtractionError = nil + break + } + // Reconstruct a ManifestAddArtifactOptions from the corresponding + // fields in the entities.ManifestModifyOptions that we decoded + // the request struct into and then supplemented with the files list. + // We waited until after the extraction goroutine finished to ensure + // that we'd pick up its changes to the ArtifactFiles list. + manifestAddArtifactOptions := entities.ManifestAddArtifactOptions{ + Type: body.ArtifactType, + LayerType: body.ArtifactLayerType, + ConfigType: body.ArtifactConfigType, + Config: body.ArtifactConfig, + ExcludeTitles: body.ArtifactExcludeTitles, + Annotations: body.ArtifactAnnotations, + Subject: body.ArtifactSubject, + Files: body.ArtifactFiles, + } + id, err := imageEngine.ManifestAddArtifact(r.Context(), name, body.ArtifactFiles, manifestAddArtifactOptions) + if err != nil { + report.Errors = append(report.Errors, err) + break + } + report.ID = id + report.Files = body.ArtifactFiles } case strings.EqualFold("remove", body.Operation): for _, image := range body.Images { @@ -550,15 +710,7 @@ func ManifestModify(w http.ResponseWriter, r *http.Request) { report.Images = append(report.Images, image) } case strings.EqualFold("annotate", body.Operation): - options := entities.ManifestAnnotateOptions{ - Annotations: body.Annotations, - Arch: body.Arch, - Features: body.Features, - OS: body.OS, - OSFeatures: body.OSFeatures, - OSVersion: body.OSVersion, - Variant: body.Variant, - } + options := body.ManifestAnnotateOptions for _, image := range body.Images { id, err := imageEngine.ManifestAnnotate(r.Context(), name, image, options) if err != nil { @@ -573,6 +725,13 @@ func ManifestModify(w http.ResponseWriter, r *http.Request) { return } + // In case something weird happened, don't just let the goroutine go; make the + // client at least wait for it. + artifactExtraction.Wait() + if artifactExtractionError != nil { + report.Errors = append(report.Errors, artifactExtractionError) + } + statusCode := http.StatusOK switch { case len(report.Errors) > 0 && len(report.Images) > 0: diff --git a/pkg/api/handlers/libpod/pods.go b/pkg/api/handlers/libpod/pods.go index 25da4beaa8..8bc081f160 100644 --- a/pkg/api/handlers/libpod/pods.go +++ b/pkg/api/handlers/libpod/pods.go @@ -177,7 +177,10 @@ func PodStop(w http.ResponseWriter, r *http.Request) { } } - report := entities.PodStopReport{Id: pod.ID()} + report := entities.PodStopReport{ + Id: pod.ID(), + RawInput: pod.Name(), + } for id, err := range responses { report.Errs = append(report.Errs, fmt.Errorf("stopping container %s: %w", id, err)) } @@ -199,7 +202,7 @@ func PodStart(w http.ResponseWriter, r *http.Request) { } status, err := pod.GetPodStatus() if err != nil { - utils.Error(w, http.StatusInternalServerError, err) + utils.InternalServerError(w, err) return } if status == define.PodStateRunning { @@ -209,11 +212,19 @@ func PodStart(w http.ResponseWriter, r *http.Request) { responses, err := pod.Start(r.Context()) if err != nil && !errors.Is(err, define.ErrPodPartialFail) { - utils.Error(w, http.StatusConflict, err) + utils.InternalServerError(w, err) return } - report := entities.PodStartReport{Id: pod.ID()} + cfg, err := pod.Config() + if err != nil { + utils.InternalServerError(w, err) + return + } + report := entities.PodStartReport{ + Id: pod.ID(), + RawInput: cfg.Name, + } for id, err := range responses { report.Errs = append(report.Errs, fmt.Errorf("%v: %w", "starting container "+id, err)) } @@ -400,12 +411,8 @@ func PodTop(w http.ResponseWriter, r *http.Request) { return } - // We are committed now - all errors logged but not reported to client, ship has sailed - w.WriteHeader(http.StatusOK) + wroteContent := false w.Header().Set("Content-Type", "application/json") - if f, ok := w.(http.Flusher); ok { - f.Flush() - } encoder := json.NewEncoder(w) @@ -417,11 +424,22 @@ loop: // break out of for/select infinite` loop default: output, err := pod.GetPodPidInformation([]string{query.PsArgs}) if err != nil { - logrus.Infof("Error from %s %q : %v", r.Method, r.URL, err) - break loop + if !wroteContent { + utils.InternalServerError(w, err) + } else { + // ship has sailed, client already got a 200 response and expects valid + // PodTopOKBody json format so we no longer can send the error. + logrus.Infof("Error from %s %q : %v", r.Method, r.URL, err) + } + return } if len(output) > 0 { + if !wroteContent { + // Write header only first time around + w.WriteHeader(http.StatusOK) + wroteContent = true + } body := handlers.PodTopOKBody{} body.Titles = utils.PSTitles(output[0]) for i := range body.Titles { @@ -559,14 +577,13 @@ func PodStats(w http.ResponseWriter, r *http.Request) { return } - var flush = func() {} + flush := func() {} if flusher, ok := w.(http.Flusher); ok { flush = flusher.Flush } // Collect the stats and send them over the wire. containerEngine := abi.ContainerEngine{Libpod: runtime} reports, err := containerEngine.PodStats(r.Context(), query.NamesOrIDs, options) - // Error checks as documented in swagger. if err != nil { if errors.Is(err, define.ErrNoSuchPod) { diff --git a/pkg/api/handlers/libpod/swagger_spec.go b/pkg/api/handlers/libpod/swagger_spec.go index 3686c8c9fc..3ba2cb4d35 100644 --- a/pkg/api/handlers/libpod/swagger_spec.go +++ b/pkg/api/handlers/libpod/swagger_spec.go @@ -7,6 +7,7 @@ import ( "os" "github.com/containers/podman/v5/pkg/api/handlers/utils" + "github.com/containers/storage/pkg/fileutils" ) // DefaultPodmanSwaggerSpec provides the default path to the podman swagger spec file @@ -17,7 +18,7 @@ func ServeSwagger(w http.ResponseWriter, r *http.Request) { if p, found := os.LookupEnv("PODMAN_SWAGGER_SPEC"); found { path = p } - if _, err := os.Stat(path); err != nil { + if err := fileutils.Exists(path); err != nil { if errors.Is(err, os.ErrNotExist) { utils.InternalServerError(w, fmt.Errorf("swagger spec %q does not exist", path)) return diff --git a/pkg/api/handlers/libpod/system.go b/pkg/api/handlers/libpod/system.go index 8e9f939588..c6444fd901 100644 --- a/pkg/api/handlers/libpod/system.go +++ b/pkg/api/handlers/libpod/system.go @@ -3,6 +3,7 @@ package libpod import ( "fmt" "net/http" + "time" "github.com/containers/podman/v5/libpod" "github.com/containers/podman/v5/pkg/api/handlers/utils" @@ -65,3 +66,46 @@ func DiskUsage(w http.ResponseWriter, r *http.Request) { } utils.WriteResponse(w, http.StatusOK, response) } + +func SystemCheck(w http.ResponseWriter, r *http.Request) { + decoder := r.Context().Value(api.DecoderKey).(*schema.Decoder) + runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime) + + query := struct { + Quick bool `schema:"quick"` + Repair bool `schema:"repair"` + RepairLossy bool `schema:"repair_lossy"` + UnreferencedLayerMaximumAge string `schema:"unreferenced_layer_max_age"` + }{} + + if err := decoder.Decode(&query, r.URL.Query()); err != nil { + utils.Error(w, http.StatusBadRequest, + fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err)) + return + } + + containerEngine := abi.ContainerEngine{Libpod: runtime} + + var unreferencedLayerMaximumAge *time.Duration + if query.UnreferencedLayerMaximumAge != "" { + duration, err := time.ParseDuration(query.UnreferencedLayerMaximumAge) + if err != nil { + utils.Error(w, http.StatusBadRequest, + fmt.Errorf("failed to parse unreferenced_layer_max_age parameter %q for %s: %w", query.UnreferencedLayerMaximumAge, r.URL.String(), err)) + } + unreferencedLayerMaximumAge = &duration + } + checkOptions := entities.SystemCheckOptions{ + Quick: query.Quick, + Repair: query.Repair, + RepairLossy: query.RepairLossy, + UnreferencedLayerMaximumAge: unreferencedLayerMaximumAge, + } + report, err := containerEngine.SystemCheck(r.Context(), checkOptions) + if err != nil { + utils.InternalServerError(w, err) + return + } + + utils.WriteResponse(w, http.StatusOK, report) +} diff --git a/pkg/api/handlers/swagger/models.go b/pkg/api/handlers/swagger/models.go index a7b561df97..a58a6bd95a 100644 --- a/pkg/api/handlers/swagger/models.go +++ b/pkg/api/handlers/swagger/models.go @@ -4,6 +4,7 @@ package swagger import ( "github.com/containers/podman/v5/pkg/domain/entities" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // Details for creating a volume @@ -48,3 +49,7 @@ type networkConnectRequestLibpod entities.NetworkConnectOptions // Network update // swagger:model type networkUpdateRequestLibpod entities.NetworkUpdateOptions + +// Container update +// swagger:model +type containerUpdateRequest container.UpdateConfig diff --git a/pkg/api/handlers/swagger/responses.go b/pkg/api/handlers/swagger/responses.go index ee8704dea8..c41a83a38e 100644 --- a/pkg/api/handlers/swagger/responses.go +++ b/pkg/api/handlers/swagger/responses.go @@ -188,6 +188,13 @@ type versionResponse struct { Body entities.ComponentVersion } +// Check +// swagger:response +type systemCheckResponse struct { + // in:body + Body entities.SystemCheckReport +} + // Disk usage // swagger:response type systemDiskUsage struct { @@ -434,7 +441,7 @@ type networkRmResponse struct { // swagger:response type networkInspectResponse struct { // in:body - Body types.Network + Body entities.NetworkInspectReport } // Network list diff --git a/pkg/api/handlers/types.go b/pkg/api/handlers/types.go index 194a8d3918..5a13530b90 100644 --- a/pkg/api/handlers/types.go +++ b/pkg/api/handlers/types.go @@ -7,6 +7,7 @@ import ( dockerContainer "github.com/docker/docker/api/types/container" dockerNetwork "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/system" "github.com/opencontainers/runtime-spec/specs-go" ) @@ -16,6 +17,8 @@ type AuthConfig struct { type ImageInspect struct { docker.ImageInspect + // Container is for backwards compat but is basically unused + Container string } type ContainerConfig struct { @@ -74,7 +77,7 @@ type UpdateEntities struct { } type Info struct { - docker.Info + system.Info BuildahVersion string CPURealtimePeriod bool CPURealtimeRuntime bool diff --git a/pkg/api/handlers/utils/errors.go b/pkg/api/handlers/utils/errors.go index 2f97bd7168..f659aa06cc 100644 --- a/pkg/api/handlers/utils/errors.go +++ b/pkg/api/handlers/utils/errors.go @@ -34,23 +34,25 @@ func Error(w http.ResponseWriter, code int, err error) { } func VolumeNotFound(w http.ResponseWriter, name string, err error) { - if !errors.Is(err, define.ErrNoSuchVolume) { - InternalServerError(w, err) + if errors.Is(err, define.ErrNoSuchVolume) || errors.Is(err, define.ErrVolumeExists) { + Error(w, http.StatusNotFound, err) + return } - Error(w, http.StatusNotFound, err) + InternalServerError(w, err) } func ContainerNotFound(w http.ResponseWriter, name string, err error) { if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrExists) { Error(w, http.StatusNotFound, err) - } else { - InternalServerError(w, err) + return } + InternalServerError(w, err) } func ImageNotFound(w http.ResponseWriter, name string, err error) { if !errors.Is(err, storage.ErrImageUnknown) { InternalServerError(w, err) + return } Error(w, http.StatusNotFound, err) } @@ -58,6 +60,7 @@ func ImageNotFound(w http.ResponseWriter, name string, err error) { func NetworkNotFound(w http.ResponseWriter, name string, err error) { if !errors.Is(err, define.ErrNoSuchNetwork) { InternalServerError(w, err) + return } Error(w, http.StatusNotFound, err) } @@ -65,6 +68,7 @@ func NetworkNotFound(w http.ResponseWriter, name string, err error) { func PodNotFound(w http.ResponseWriter, name string, err error) { if !errors.Is(err, define.ErrNoSuchPod) { InternalServerError(w, err) + return } Error(w, http.StatusNotFound, err) } @@ -72,6 +76,7 @@ func PodNotFound(w http.ResponseWriter, name string, err error) { func SessionNotFound(w http.ResponseWriter, name string, err error) { if !errors.Is(err, define.ErrNoSuchExecSession) { InternalServerError(w, err) + return } Error(w, http.StatusNotFound, err) } @@ -79,6 +84,7 @@ func SessionNotFound(w http.ResponseWriter, name string, err error) { func SecretNotFound(w http.ResponseWriter, nameOrID string, err error) { if errorhandling.Cause(err).Error() != "no such secret" { InternalServerError(w, err) + return } Error(w, http.StatusNotFound, err) } diff --git a/pkg/api/server/doc.go b/pkg/api/server/doc.go index 518a585827..b7f31fffa9 100644 --- a/pkg/api/server/doc.go +++ b/pkg/api/server/doc.go @@ -27,15 +27,15 @@ // // 'podman info' // -// curl --unix-socket /run/podman/podman.sock http://d/v4.0.0/libpod/info +// curl --unix-socket /run/podman/podman.sock http://d/v5.0.0/libpod/info // // 'podman pull quay.io/containers/podman' // -// curl -XPOST --unix-socket /run/podman/podman.sock -v 'http://d/v4.0.0/images/create?fromImage=quay.io%2Fcontainers%2Fpodman' +// curl -XPOST --unix-socket /run/podman/podman.sock -v 'http://d/v5.0.0/images/create?fromImage=quay.io%2Fcontainers%2Fpodman' // // 'podman list images' // -// curl --unix-socket /run/podman/podman.sock -v 'http://d/v4.0.0/libpod/images/json' | jq +// curl --unix-socket /run/podman/podman.sock -v 'http://d/v5.0.0/libpod/images/json' | jq // // Terms Of Service: // @@ -44,7 +44,7 @@ // Schemes: http, https // Host: podman.io // BasePath: / -// Version: 4.0.0 +// Version: 5.0.0 // License: Apache-2.0 https://opensource.org/licenses/Apache-2.0 // Contact: Podman https://podman.io/community/ // diff --git a/pkg/api/server/register_containers.go b/pkg/api/server/register_containers.go index 998a70e1dc..b3a603af95 100644 --- a/pkg/api/server/register_containers.go +++ b/pkg/api/server/register_containers.go @@ -675,6 +675,35 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // $ref: "#/responses/internalError" r.HandleFunc(VersionedPath("/containers/{name}/rename"), s.APIHandler(compat.RenameContainer)).Methods(http.MethodPost) r.HandleFunc("/containers/{name}/rename", s.APIHandler(compat.RenameContainer)).Methods(http.MethodPost) + // swagger:operation POST /containers/{name}/update compat ContainerUpdate + // --- + // tags: + // - containers (compat) + // summary: Update configuration of an existing container + // description: Change configuration settings for an existing container without requiring recreation. + // parameters: + // - in: path + // name: name + // type: string + // required: true + // description: Full or partial ID or full name of the container to rename + // - in: body + // name: resources + // required: false + // description: attributes for updating the container + // schema: + // $ref: "#/definitions/containerUpdateRequest" + // produces: + // - application/json + // responses: + // 200: + // description: no error + // 404: + // $ref: "#/responses/containerNotFound" + // 500: + // $ref: "#/responses/internalError" + r.HandleFunc(VersionedPath("/containers/{name}/update"), s.APIHandler(compat.UpdateContainer)).Methods(http.MethodPost) + r.HandleFunc("/containers/{name}/update", s.APIHandler(compat.UpdateContainer)).Methods(http.MethodPost) /* libpod endpoints @@ -1755,8 +1784,18 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // type: string // required: true // description: Full or partial ID or full name of the container to update + // - in: query + // name: restartPolicy + // type: string + // required: false + // description: New restart policy for the container. + // - in: query + // name: restartRetries + // type: integer + // required: false + // description: New amount of retries for the container's restart policy. Only allowed if restartPolicy is set to on-failure // - in: body - // name: resources + // name: config // description: attributes for updating the container // schema: // $ref: "#/definitions/UpdateEntities" @@ -1766,6 +1805,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // responses: // 201: // $ref: "#/responses/containerUpdateResponse" + // 400: + // $ref: "#/responses/badParamError" // 404: // $ref: "#/responses/containerNotFound" // 500: diff --git a/pkg/api/server/register_images.go b/pkg/api/server/register_images.go index b04fbd52b4..c592eec228 100644 --- a/pkg/api/server/register_images.go +++ b/pkg/api/server/register_images.go @@ -267,11 +267,20 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // - in: query // name: compress // type: boolean - // description: use compression on image + // description: Use compression on image. // - in: query // name: destination // type: string - // description: destination name for the image being pushed + // description: Allows for pushing the image to a different destination than the image refers to. + // - in: query + // name: format + // type: string + // description: Manifest type (oci, v2s1, or v2s2) to use when pushing an image. Default is manifest type of source, with fallbacks. + // - in: query + // name: tlsVerify + // description: Require TLS verification. + // type: boolean + // default: true // - in: header // name: X-Registry-Auth // type: string @@ -658,6 +667,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // default: // description: | // Platform format os[/arch[/variant]] + // Can be comma separated list for multi arch builds. // (As of version 1.xx) // - in: query // name: target @@ -731,15 +741,43 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // type: boolean // default: false // - in: query + // name: compressionFormat + // type: string + // description: Compression format used to compress image layers. + // - in: query + // name: compressionLevel + // type: integer + // description: Compression level used to compress image layers. + // - in: query // name: tlsVerify // description: Require TLS verification. // type: boolean // default: true // - in: query // name: quiet - // description: "silences extra stream data on push" + // description: Silences extra stream data on push. // type: boolean // default: true + // - in: query + // name: format + // type: string + // description: Manifest type (oci, v2s1, or v2s2) to use when pushing an image. Default is manifest type of source, with fallbacks. + // - in: query + // name: all + // type: boolean + // description: All indicates whether to push all images related to the image list. + // - in: query + // name: removeSignatures + // type: boolean + // description: Discard any pre-existing signatures in the image. + // - in: query + // name: retry + // type: integer + // description: Number of times to retry push in case of failure. + // - in: query + // name: retryDelay + // type: string + // description: Delay between retries in case of push failures. Duration format such as "412ms", or "3.5h". // - in: header // name: X-Registry-Auth // type: string diff --git a/pkg/api/server/register_manifest.go b/pkg/api/server/register_manifest.go index fb77e8e262..a7bd4efc11 100644 --- a/pkg/api/server/register_manifest.go +++ b/pkg/api/server/register_manifest.go @@ -103,14 +103,14 @@ func (s *APIServer) registerManifestHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/internalError" v4.Handle("/{name:.*}/registry/{destination:.*}", s.APIHandler(libpod.ManifestPush)).Methods(http.MethodPost) - // swagger:operation POST /libpod/manifests manifests ManifestCreateLibpod + // swagger:operation POST /libpod/manifests/{name} manifests ManifestCreateLibpod // --- // summary: Create // description: Create a manifest list // produces: // - application/json // parameters: - // - in: query + // - in: path // name: name // type: string // description: manifest list or index name to create diff --git a/pkg/api/server/register_system.go b/pkg/api/server/register_system.go index 2034c2a7b6..4010dfb0e0 100644 --- a/pkg/api/server/register_system.go +++ b/pkg/api/server/register_system.go @@ -25,6 +25,39 @@ func (s *APIServer) registerSystemHandlers(r *mux.Router) error { r.Handle(VersionedPath("/system/df"), s.APIHandler(compat.GetDiskUsage)).Methods(http.MethodGet) // Added non version path to URI to support docker non versioned paths r.Handle("/system/df", s.APIHandler(compat.GetDiskUsage)).Methods(http.MethodGet) + // swagger:operation POST /libpod/system/check libpod SystemCheckLibpod + // --- + // tags: + // - system + // summary: Performs consistency checks on storage, optionally removing items which fail checks + // parameters: + // - in: query + // name: quick + // type: boolean + // description: Skip time-consuming checks + // - in: query + // name: repair + // type: boolean + // description: Remove inconsistent images + // - in: query + // name: repair_lossy + // type: boolean + // description: Remove inconsistent containers and images + // - in: query + // name: unreferenced_layer_max_age + // type: string + // description: Maximum allowed age of unreferenced layers + // default: 24h0m0s + // produces: + // - application/json + // responses: + // 200: + // $ref: '#/responses/systemCheckResponse' + // 400: + // $ref: "#/responses/badParamError" + // 500: + // $ref: "#/responses/internalError" + r.Handle(VersionedPath("/libpod/system/check"), s.APIHandler(libpod.SystemCheck)).Methods(http.MethodPost) // swagger:operation POST /libpod/system/prune libpod SystemPruneLibpod // --- // tags: diff --git a/pkg/bindings/connection.go b/pkg/bindings/connection.go index d2c3aaba2a..496e469157 100644 --- a/pkg/bindings/connection.go +++ b/pkg/bindings/connection.go @@ -321,30 +321,35 @@ func (c *Connection) GetDialer(ctx context.Context) (net.Conn, error) { // IsInformational returns true if the response code is 1xx func (h *APIResponse) IsInformational() bool { + //nolint:usestdlibvars // linter wants to use http.StatusContinue over 100 but that makes less readable IMO return h.Response.StatusCode/100 == 1 } // IsSuccess returns true if the response code is 2xx func (h *APIResponse) IsSuccess() bool { + //nolint:usestdlibvars // linter wants to use http.StatusContinue over 100 but that makes less readable IMO return h.Response.StatusCode/100 == 2 } // IsRedirection returns true if the response code is 3xx func (h *APIResponse) IsRedirection() bool { + //nolint:usestdlibvars // linter wants to use http.StatusContinue over 100 but that makes less readable IMO return h.Response.StatusCode/100 == 3 } // IsClientError returns true if the response code is 4xx func (h *APIResponse) IsClientError() bool { + //nolint:usestdlibvars // linter wants to use http.StatusContinue over 100 but that makes less readable IMO return h.Response.StatusCode/100 == 4 } // IsConflictError returns true if the response code is 409 func (h *APIResponse) IsConflictError() bool { - return h.Response.StatusCode == 409 + return h.Response.StatusCode == http.StatusConflict } // IsServerError returns true if the response code is 5xx func (h *APIResponse) IsServerError() bool { + //nolint:usestdlibvars // linter wants to use http.StatusContinue over 100 but that makes less readable IMO return h.Response.StatusCode/100 == 5 } diff --git a/pkg/bindings/containers/containers.go b/pkg/bindings/containers/containers.go index 19fc74f735..116ed91875 100644 --- a/pkg/bindings/containers/containers.go +++ b/pkg/bindings/containers/containers.go @@ -427,7 +427,7 @@ func Export(ctx context.Context, nameOrID string, w io.Writer, options *ExportOp } defer response.Body.Close() - if response.StatusCode/100 == 2 { + if response.IsSuccess() { _, err = io.Copy(w, response.Body) return err } diff --git a/pkg/bindings/containers/term_windows.go b/pkg/bindings/containers/term_windows.go index ba91ae2c5b..1be8f2d68f 100644 --- a/pkg/bindings/containers/term_windows.go +++ b/pkg/bindings/containers/term_windows.go @@ -61,7 +61,6 @@ func notifyWinChange(ctx context.Context, winChange chan os.Signal, stdin *os.Fi } } }() - } func getTermSize(stdin *os.File, stdout *os.File) (width, height int, err error) { diff --git a/pkg/bindings/containers/update.go b/pkg/bindings/containers/update.go index 20b743c954..37cf74426e 100644 --- a/pkg/bindings/containers/update.go +++ b/pkg/bindings/containers/update.go @@ -3,6 +3,8 @@ package containers import ( "context" "net/http" + "net/url" + "strconv" "strings" "github.com/containers/podman/v5/pkg/bindings" @@ -16,12 +18,20 @@ func Update(ctx context.Context, options *types.ContainerUpdateOptions) (string, return "", err } + params := url.Values{} + if options.Specgen.RestartPolicy != "" { + params.Set("restartPolicy", options.Specgen.RestartPolicy) + if options.Specgen.RestartRetries != nil { + params.Set("restartRetries", strconv.Itoa(int(*options.Specgen.RestartRetries))) + } + } + resources, err := jsoniter.MarshalToString(options.Specgen.ResourceLimits) if err != nil { return "", err } stringReader := strings.NewReader(resources) - response, err := conn.DoRequest(ctx, stringReader, http.MethodPost, "/containers/%s/update", nil, nil, options.NameOrID) + response, err := conn.DoRequest(ctx, stringReader, http.MethodPost, "/containers/%s/update", params, nil, options.NameOrID) if err != nil { return "", err } diff --git a/pkg/bindings/images/build.go b/pkg/bindings/images/build.go index bb4f65e315..58b3c25af0 100644 --- a/pkg/bindings/images/build.go +++ b/pkg/bindings/images/build.go @@ -2,7 +2,6 @@ package images import ( "archive/tar" - "compress/gzip" "context" "encoding/json" "errors" @@ -31,6 +30,7 @@ import ( "github.com/docker/go-units" "github.com/hashicorp/go-multierror" jsoniter "github.com/json-iterator/go" + gzip "github.com/klauspost/pgzip" "github.com/sirupsen/logrus" ) @@ -483,7 +483,7 @@ func Build(ctx context.Context, containerFiles []string, options types.BuildOpti dontexcludes = append(dontexcludes, "!"+containerfile+".containerignore") } else { // If Containerfile does not exist, assume it is in context directory and do Not add to tarfile - if _, err := os.Lstat(containerfile); err != nil { + if err := fileutils.Lexists(containerfile); err != nil { if !os.IsNotExist(err) { return nil, err } diff --git a/pkg/bindings/images/images.go b/pkg/bindings/images/images.go index 018edbc974..275e68a9ac 100644 --- a/pkg/bindings/images/images.go +++ b/pkg/bindings/images/images.go @@ -162,7 +162,7 @@ func Export(ctx context.Context, nameOrIDs []string, w io.Writer, options *Expor } defer response.Body.Close() - if response.StatusCode/100 == 2 || response.StatusCode/100 == 3 { + if response.IsSuccess() || response.IsRedirection() { _, err = io.Copy(w, response.Body) return err } diff --git a/pkg/bindings/images/types.go b/pkg/bindings/images/types.go index 6c4cb07dbd..7621f8cd7d 100644 --- a/pkg/bindings/images/types.go +++ b/pkg/bindings/images/types.go @@ -162,6 +162,10 @@ type PushOptions struct { SkipTLSVerify *bool `schema:"-"` // RemoveSignatures Discard any pre-existing signatures in the image. RemoveSignatures *bool + // Retry number of times to retry push in case of failure + Retry *uint + // RetryDelay between retries in case of push failures + RetryDelay *string // Username for authenticating against the registry. Username *string `schema:"-"` // Quiet can be specified to suppress progress when pushing. diff --git a/pkg/bindings/images/types_push_options.go b/pkg/bindings/images/types_push_options.go index 2dcd382912..686636cdb2 100644 --- a/pkg/bindings/images/types_push_options.go +++ b/pkg/bindings/images/types_push_options.go @@ -198,6 +198,36 @@ func (o *PushOptions) GetRemoveSignatures() bool { return *o.RemoveSignatures } +// WithRetry set field Retry to given value +func (o *PushOptions) WithRetry(value uint) *PushOptions { + o.Retry = &value + return o +} + +// GetRetry returns value of field Retry +func (o *PushOptions) GetRetry() uint { + if o.Retry == nil { + var z uint + return z + } + return *o.Retry +} + +// WithRetryDelay set field RetryDelay to given value +func (o *PushOptions) WithRetryDelay(value string) *PushOptions { + o.RetryDelay = &value + return o +} + +// GetRetryDelay returns value of field RetryDelay +func (o *PushOptions) GetRetryDelay() string { + if o.RetryDelay == nil { + var z string + return z + } + return *o.RetryDelay +} + // WithUsername set field Username to given value func (o *PushOptions) WithUsername(value string) *PushOptions { o.Username = &value diff --git a/pkg/bindings/manifests/manifests.go b/pkg/bindings/manifests/manifests.go index bc183042d6..9ac41a85eb 100644 --- a/pkg/bindings/manifests/manifests.go +++ b/pkg/bindings/manifests/manifests.go @@ -6,10 +6,15 @@ import ( "errors" "fmt" "io" + "mime/multipart" "net/http" + "net/textproto" "os" + "path/filepath" + "slices" "strconv" "strings" + "sync" "github.com/containers/common/libimage/define" "github.com/containers/image/v5/manifest" @@ -160,7 +165,7 @@ func Add(ctx context.Context, name string, options *AddOptions) (string, error) Features: options.Features, Images: options.Images, OS: options.OS, - OSFeatures: nil, + OSFeatures: options.OSFeatures, OSVersion: options.OSVersion, Variant: options.Variant, Username: options.Username, @@ -172,6 +177,37 @@ func Add(ctx context.Context, name string, options *AddOptions) (string, error) return Modify(ctx, name, options.Images, &optionsv4) } +// AddArtifact creates an artifact manifest and adds it to a given manifest +// list. Additional options for the manifest can also be specified. The ID of +// the new manifest list is returned as a string +func AddArtifact(ctx context.Context, name string, options *AddArtifactOptions) (string, error) { + if options == nil { + options = new(AddArtifactOptions) + } + optionsv4 := ModifyOptions{ + Annotations: options.Annotation, + Arch: options.Arch, + Features: options.Features, + OS: options.OS, + OSFeatures: options.OSFeatures, + OSVersion: options.OSVersion, + Variant: options.Variant, + + ArtifactType: options.Type, + ArtifactConfigType: options.ConfigType, + ArtifactLayerType: options.LayerType, + ArtifactConfig: options.Config, + ArtifactExcludeTitles: options.ExcludeTitles, + ArtifactSubject: options.Subject, + ArtifactAnnotations: options.Annotations, + } + if len(options.Files) > 0 { + optionsv4.WithArtifactFiles(options.Files) + } + optionsv4.WithOperation("update") + return Modify(ctx, name, nil, &optionsv4) +} + // Remove deletes a manifest entry from a manifest list. Both name and the digest to be // removed are mandatory inputs. The ID of the new manifest list is returned as a string. func Remove(ctx context.Context, name, digest string, _ *RemoveOptions) (string, error) { @@ -284,6 +320,16 @@ func Modify(ctx context.Context, name string, images []string, options *ModifyOp } options.WithImages(images) + var artifactFiles, artifactBaseNames []string + if options.ArtifactFiles != nil && len(*options.ArtifactFiles) > 0 { + artifactFiles = slices.Clone(*options.ArtifactFiles) + artifactBaseNames = make([]string, 0, len(artifactFiles)) + for _, filename := range artifactFiles { + artifactBaseNames = append(artifactBaseNames, filepath.Base(filename)) + } + options.ArtifactFiles = &artifactBaseNames + } + conn, err := bindings.GetClient(ctx) if err != nil { return "", err @@ -292,12 +338,81 @@ func Modify(ctx context.Context, name string, images []string, options *ModifyOp if err != nil { return "", err } - reader := strings.NewReader(opts) + reader := io.Reader(strings.NewReader(opts)) + if options.Body != nil { + reader = io.MultiReader(reader, *options.Body) + } + var artifactContentType string + var artifactWriterGroup sync.WaitGroup + var artifactWriterError error + if len(artifactFiles) > 0 { + // get ready to upload the passed-in files + bodyReader, bodyWriter := io.Pipe() + defer bodyReader.Close() + requestBodyReader := reader + reader = bodyReader + // upload the files in another goroutine + writer := multipart.NewWriter(bodyWriter) + artifactContentType = writer.FormDataContentType() + artifactWriterGroup.Add(1) + go func() { + defer bodyWriter.Close() + defer writer.Close() + // start with the body we would have uploaded if we weren't + // attaching artifacts + headers := textproto.MIMEHeader{ + "Content-Type": []string{"application/json"}, + } + requestPartWriter, err := writer.CreatePart(headers) + if err != nil { + artifactWriterError = fmt.Errorf("creating form part for request: %v", err) + return + } + if _, err := io.Copy(requestPartWriter, requestBodyReader); err != nil { + artifactWriterError = fmt.Errorf("uploading request as form part: %v", err) + return + } + // now walk the list of files we're attaching + for _, file := range artifactFiles { + if err := func() error { + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() + fileBase := filepath.Base(file) + formFile, err := writer.CreateFormFile(fileBase, fileBase) + if err != nil { + return err + } + st, err := f.Stat() + if err != nil { + return err + } + // upload the file contents + n, err := io.Copy(formFile, f) + if err != nil { + return fmt.Errorf("uploading contents of artifact file %s: %w", filepath.Base(file), err) + } + if n != st.Size() { + return fmt.Errorf("short write while uploading contents of artifact file %s: %d != %d", filepath.Base(file), n, st.Size()) + } + return nil + }(); err != nil { + artifactWriterError = err + break + } + } + }() + } header, err := auth.MakeXRegistryAuthHeader(&imageTypes.SystemContext{AuthFilePath: options.GetAuthfile()}, options.GetUsername(), options.GetPassword()) if err != nil { return "", err } + if artifactContentType != "" { + header["Content-Type"] = []string{artifactContentType} + } params, err := options.ToParams() if err != nil { @@ -315,6 +430,11 @@ func Modify(ctx context.Context, name string, images []string, options *ModifyOp } defer response.Body.Close() + artifactWriterGroup.Wait() + if artifactWriterError != nil { + return "", fmt.Errorf("uploading artifacts: %w", err) + } + data, err := io.ReadAll(response.Body) if err != nil { return "", fmt.Errorf("unable to process API response: %w", err) diff --git a/pkg/bindings/manifests/types.go b/pkg/bindings/manifests/types.go index c9e14b1233..aae36c9a64 100644 --- a/pkg/bindings/manifests/types.go +++ b/pkg/bindings/manifests/types.go @@ -1,5 +1,7 @@ package manifests +import "io" + // InspectOptions are optional options for inspecting manifests // //go:generate go run ../generator/generator.go InspectOptions @@ -15,8 +17,9 @@ type InspectOptions struct { // //go:generate go run ../generator/generator.go CreateOptions type CreateOptions struct { - All *bool - Amend *bool + All *bool + Amend *bool + Annotation map[string]string } // ExistsOptions are optional options for checking @@ -30,20 +33,45 @@ type ExistsOptions struct { // //go:generate go run ../generator/generator.go AddOptions type AddOptions struct { - All *bool - Annotation map[string]string - Arch *string - Features []string + All *bool + + Annotation map[string]string + Arch *string + Features []string + OS *string + OSVersion *string + OSFeatures []string + Variant *string + Images []string - OS *string - OSVersion *string - Variant *string Authfile *string Password *string Username *string SkipTLSVerify *bool `schema:"-"` } +// AddArtifactOptions are optional options for adding artifact manifests +// +//go:generate go run ../generator/generator.go AddArtifactOptions +type AddArtifactOptions struct { + Annotation map[string]string + Arch *string + Features []string + OS *string + OSVersion *string + OSFeatures []string + Variant *string + + Type **string `json:"artifact_type,omitempty"` + ConfigType *string `json:"artifact_config_type,omitempty"` + Config *string `json:"artifact_config,omitempty"` + LayerType *string `json:"artifact_layer_type,omitempty"` + ExcludeTitles *bool `json:"artifact_exclude_titles,omitempty"` + Subject *string `json:"artifact_subject,omitempty"` + Annotations map[string]string `json:"artifact_annotations,omitempty"` + Files []string `json:"artifact_files,omitempty"` +} + // RemoveOptions are optional options for removing manifest lists // //go:generate go run ../generator/generator.go RemoveOptions @@ -55,21 +83,31 @@ type RemoveOptions struct { //go:generate go run ../generator/generator.go ModifyOptions type ModifyOptions struct { // Operation values are "update", "remove" and "annotate". This allows the service to - // efficiently perform each update on a manifest list. - Operation *string - All *bool // All when true, operate on all images in a manifest list that may be included in Images - Annotations map[string]string // Annotations to add to manifest list + // efficiently perform each update on a manifest list. + Operation *string + All *bool // All when true, operate on all images in a manifest list that may be included in Images + + Annotations map[string]string // Annotations to add to the entries for Images in the manifest list Arch *string // Arch overrides the architecture for the image Features []string // Feature list for the image - Images []string // Images is an optional list of images to add/remove to/from manifest list depending on operation OS *string // OS overrides the operating system for the image - // OS features for the image - OSFeatures []string `json:"os_features" schema:"os_features"` - // OSVersion overrides the operating system for the image - OSVersion *string `json:"os_version" schema:"os_version"` - Variant *string // Variant overrides the operating system variant for the image + OSFeatures []string `json:"os_features" schema:"os_features"` // OSFeatures overrides the OS features for the image + OSVersion *string `json:"os_version" schema:"os_version"` // OSVersion overrides the operating system version for the image + Variant *string // Variant overrides the architecture variant for the image + + Images []string // Images is an optional list of images to add/remove to/from manifest list depending on operation Authfile *string Password *string Username *string SkipTLSVerify *bool `schema:"-"` + + ArtifactType **string `json:"artifact_type"` // the ArtifactType in an artifact manifest being created + ArtifactConfigType *string `json:"artifact_config_type"` // the config.MediaType in an artifact manifest being created + ArtifactConfig *string `json:"artifact_config"` // the config.Data in an artifact manifest being created + ArtifactLayerType *string `json:"artifact_layer_type"` // the MediaType for each layer in an artifact manifest being created + ArtifactExcludeTitles *bool `json:"artifact_exclude_titles"` // whether or not to include title annotations for each layer in an artifact manifest being created + ArtifactSubject *string `json:"artifact_subject"` // subject to set in an artifact manifest being created + ArtifactAnnotations map[string]string `json:"artifact_annotations"` // annotations to add to an artifact manifest being created + ArtifactFiles *[]string `json:"artifact_files"` // an optional list of files to add to a new artifact manifest in the manifest list + Body *io.Reader `json:"-" schema:"-"` } diff --git a/pkg/bindings/manifests/types_add_options.go b/pkg/bindings/manifests/types_add_options.go index cd1c2c7aa2..47892d7936 100644 --- a/pkg/bindings/manifests/types_add_options.go +++ b/pkg/bindings/manifests/types_add_options.go @@ -77,21 +77,6 @@ func (o *AddOptions) GetFeatures() []string { return o.Features } -// WithImages set field Images to given value -func (o *AddOptions) WithImages(value []string) *AddOptions { - o.Images = value - return o -} - -// GetImages returns value of field Images -func (o *AddOptions) GetImages() []string { - if o.Images == nil { - var z []string - return z - } - return o.Images -} - // WithOS set field OS to given value func (o *AddOptions) WithOS(value string) *AddOptions { o.OS = &value @@ -122,6 +107,21 @@ func (o *AddOptions) GetOSVersion() string { return *o.OSVersion } +// WithOSFeatures set field OSFeatures to given value +func (o *AddOptions) WithOSFeatures(value []string) *AddOptions { + o.OSFeatures = value + return o +} + +// GetOSFeatures returns value of field OSFeatures +func (o *AddOptions) GetOSFeatures() []string { + if o.OSFeatures == nil { + var z []string + return z + } + return o.OSFeatures +} + // WithVariant set field Variant to given value func (o *AddOptions) WithVariant(value string) *AddOptions { o.Variant = &value @@ -137,6 +137,21 @@ func (o *AddOptions) GetVariant() string { return *o.Variant } +// WithImages set field Images to given value +func (o *AddOptions) WithImages(value []string) *AddOptions { + o.Images = value + return o +} + +// GetImages returns value of field Images +func (o *AddOptions) GetImages() []string { + if o.Images == nil { + var z []string + return z + } + return o.Images +} + // WithAuthfile set field Authfile to given value func (o *AddOptions) WithAuthfile(value string) *AddOptions { o.Authfile = &value diff --git a/pkg/bindings/manifests/types_addartifact_options.go b/pkg/bindings/manifests/types_addartifact_options.go new file mode 100644 index 0000000000..3b1c2804ec --- /dev/null +++ b/pkg/bindings/manifests/types_addartifact_options.go @@ -0,0 +1,243 @@ +// Code generated by go generate; DO NOT EDIT. +package manifests + +import ( + "net/url" + + "github.com/containers/podman/v5/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *AddArtifactOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *AddArtifactOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithAnnotation set field Annotation to given value +func (o *AddArtifactOptions) WithAnnotation(value map[string]string) *AddArtifactOptions { + o.Annotation = value + return o +} + +// GetAnnotation returns value of field Annotation +func (o *AddArtifactOptions) GetAnnotation() map[string]string { + if o.Annotation == nil { + var z map[string]string + return z + } + return o.Annotation +} + +// WithArch set field Arch to given value +func (o *AddArtifactOptions) WithArch(value string) *AddArtifactOptions { + o.Arch = &value + return o +} + +// GetArch returns value of field Arch +func (o *AddArtifactOptions) GetArch() string { + if o.Arch == nil { + var z string + return z + } + return *o.Arch +} + +// WithFeatures set field Features to given value +func (o *AddArtifactOptions) WithFeatures(value []string) *AddArtifactOptions { + o.Features = value + return o +} + +// GetFeatures returns value of field Features +func (o *AddArtifactOptions) GetFeatures() []string { + if o.Features == nil { + var z []string + return z + } + return o.Features +} + +// WithOS set field OS to given value +func (o *AddArtifactOptions) WithOS(value string) *AddArtifactOptions { + o.OS = &value + return o +} + +// GetOS returns value of field OS +func (o *AddArtifactOptions) GetOS() string { + if o.OS == nil { + var z string + return z + } + return *o.OS +} + +// WithOSVersion set field OSVersion to given value +func (o *AddArtifactOptions) WithOSVersion(value string) *AddArtifactOptions { + o.OSVersion = &value + return o +} + +// GetOSVersion returns value of field OSVersion +func (o *AddArtifactOptions) GetOSVersion() string { + if o.OSVersion == nil { + var z string + return z + } + return *o.OSVersion +} + +// WithOSFeatures set field OSFeatures to given value +func (o *AddArtifactOptions) WithOSFeatures(value []string) *AddArtifactOptions { + o.OSFeatures = value + return o +} + +// GetOSFeatures returns value of field OSFeatures +func (o *AddArtifactOptions) GetOSFeatures() []string { + if o.OSFeatures == nil { + var z []string + return z + } + return o.OSFeatures +} + +// WithVariant set field Variant to given value +func (o *AddArtifactOptions) WithVariant(value string) *AddArtifactOptions { + o.Variant = &value + return o +} + +// GetVariant returns value of field Variant +func (o *AddArtifactOptions) GetVariant() string { + if o.Variant == nil { + var z string + return z + } + return *o.Variant +} + +// WithType set field Type to given value +func (o *AddArtifactOptions) WithType(value *string) *AddArtifactOptions { + o.Type = &value + return o +} + +// GetType returns value of field Type +func (o *AddArtifactOptions) GetType() *string { + if o.Type == nil { + var z *string + return z + } + return *o.Type +} + +// WithConfigType set field ConfigType to given value +func (o *AddArtifactOptions) WithConfigType(value string) *AddArtifactOptions { + o.ConfigType = &value + return o +} + +// GetConfigType returns value of field ConfigType +func (o *AddArtifactOptions) GetConfigType() string { + if o.ConfigType == nil { + var z string + return z + } + return *o.ConfigType +} + +// WithConfig set field Config to given value +func (o *AddArtifactOptions) WithConfig(value string) *AddArtifactOptions { + o.Config = &value + return o +} + +// GetConfig returns value of field Config +func (o *AddArtifactOptions) GetConfig() string { + if o.Config == nil { + var z string + return z + } + return *o.Config +} + +// WithLayerType set field LayerType to given value +func (o *AddArtifactOptions) WithLayerType(value string) *AddArtifactOptions { + o.LayerType = &value + return o +} + +// GetLayerType returns value of field LayerType +func (o *AddArtifactOptions) GetLayerType() string { + if o.LayerType == nil { + var z string + return z + } + return *o.LayerType +} + +// WithExcludeTitles set field ExcludeTitles to given value +func (o *AddArtifactOptions) WithExcludeTitles(value bool) *AddArtifactOptions { + o.ExcludeTitles = &value + return o +} + +// GetExcludeTitles returns value of field ExcludeTitles +func (o *AddArtifactOptions) GetExcludeTitles() bool { + if o.ExcludeTitles == nil { + var z bool + return z + } + return *o.ExcludeTitles +} + +// WithSubject set field Subject to given value +func (o *AddArtifactOptions) WithSubject(value string) *AddArtifactOptions { + o.Subject = &value + return o +} + +// GetSubject returns value of field Subject +func (o *AddArtifactOptions) GetSubject() string { + if o.Subject == nil { + var z string + return z + } + return *o.Subject +} + +// WithAnnotations set field Annotations to given value +func (o *AddArtifactOptions) WithAnnotations(value map[string]string) *AddArtifactOptions { + o.Annotations = value + return o +} + +// GetAnnotations returns value of field Annotations +func (o *AddArtifactOptions) GetAnnotations() map[string]string { + if o.Annotations == nil { + var z map[string]string + return z + } + return o.Annotations +} + +// WithFiles set field Files to given value +func (o *AddArtifactOptions) WithFiles(value []string) *AddArtifactOptions { + o.Files = value + return o +} + +// GetFiles returns value of field Files +func (o *AddArtifactOptions) GetFiles() []string { + if o.Files == nil { + var z []string + return z + } + return o.Files +} diff --git a/pkg/bindings/manifests/types_create_options.go b/pkg/bindings/manifests/types_create_options.go index 65407928cc..758010716c 100644 --- a/pkg/bindings/manifests/types_create_options.go +++ b/pkg/bindings/manifests/types_create_options.go @@ -46,3 +46,18 @@ func (o *CreateOptions) GetAmend() bool { } return *o.Amend } + +// WithAnnotation set field Annotation to given value +func (o *CreateOptions) WithAnnotation(value map[string]string) *CreateOptions { + o.Annotation = value + return o +} + +// GetAnnotation returns value of field Annotation +func (o *CreateOptions) GetAnnotation() map[string]string { + if o.Annotation == nil { + var z map[string]string + return z + } + return o.Annotation +} diff --git a/pkg/bindings/manifests/types_modify_options.go b/pkg/bindings/manifests/types_modify_options.go index 866b658b4d..1957b8027a 100644 --- a/pkg/bindings/manifests/types_modify_options.go +++ b/pkg/bindings/manifests/types_modify_options.go @@ -2,6 +2,7 @@ package manifests import ( + "io" "net/url" "github.com/containers/podman/v5/pkg/bindings/internal/util" @@ -47,13 +48,13 @@ func (o *ModifyOptions) GetAll() bool { return *o.All } -// WithAnnotations set annotations to add to manifest list +// WithAnnotations set annotations to add to the entries for Images in the manifest list func (o *ModifyOptions) WithAnnotations(value map[string]string) *ModifyOptions { o.Annotations = value return o } -// GetAnnotations returns value of annotations to add to manifest list +// GetAnnotations returns value of annotations to add to the entries for Images in the manifest list func (o *ModifyOptions) GetAnnotations() map[string]string { if o.Annotations == nil { var z map[string]string @@ -92,21 +93,6 @@ func (o *ModifyOptions) GetFeatures() []string { return o.Features } -// WithImages set images is an optional list of images to add/remove to/from manifest list depending on operation -func (o *ModifyOptions) WithImages(value []string) *ModifyOptions { - o.Images = value - return o -} - -// GetImages returns value of images is an optional list of images to add/remove to/from manifest list depending on operation -func (o *ModifyOptions) GetImages() []string { - if o.Images == nil { - var z []string - return z - } - return o.Images -} - // WithOS set oS overrides the operating system for the image func (o *ModifyOptions) WithOS(value string) *ModifyOptions { o.OS = &value @@ -122,13 +108,13 @@ func (o *ModifyOptions) GetOS() string { return *o.OS } -// WithOSFeatures set field OSFeatures to given value +// WithOSFeatures set oSFeatures overrides the OS features for the image func (o *ModifyOptions) WithOSFeatures(value []string) *ModifyOptions { o.OSFeatures = value return o } -// GetOSFeatures returns value of field OSFeatures +// GetOSFeatures returns value of oSFeatures overrides the OS features for the image func (o *ModifyOptions) GetOSFeatures() []string { if o.OSFeatures == nil { var z []string @@ -137,13 +123,13 @@ func (o *ModifyOptions) GetOSFeatures() []string { return o.OSFeatures } -// WithOSVersion set field OSVersion to given value +// WithOSVersion set oSVersion overrides the operating system version for the image func (o *ModifyOptions) WithOSVersion(value string) *ModifyOptions { o.OSVersion = &value return o } -// GetOSVersion returns value of field OSVersion +// GetOSVersion returns value of oSVersion overrides the operating system version for the image func (o *ModifyOptions) GetOSVersion() string { if o.OSVersion == nil { var z string @@ -152,13 +138,13 @@ func (o *ModifyOptions) GetOSVersion() string { return *o.OSVersion } -// WithVariant set variant overrides the operating system variant for the image +// WithVariant set variant overrides the architecture variant for the image func (o *ModifyOptions) WithVariant(value string) *ModifyOptions { o.Variant = &value return o } -// GetVariant returns value of variant overrides the operating system variant for the image +// GetVariant returns value of variant overrides the architecture variant for the image func (o *ModifyOptions) GetVariant() string { if o.Variant == nil { var z string @@ -167,6 +153,21 @@ func (o *ModifyOptions) GetVariant() string { return *o.Variant } +// WithImages set images is an optional list of images to add/remove to/from manifest list depending on operation +func (o *ModifyOptions) WithImages(value []string) *ModifyOptions { + o.Images = value + return o +} + +// GetImages returns value of images is an optional list of images to add/remove to/from manifest list depending on operation +func (o *ModifyOptions) GetImages() []string { + if o.Images == nil { + var z []string + return z + } + return o.Images +} + // WithAuthfile set field Authfile to given value func (o *ModifyOptions) WithAuthfile(value string) *ModifyOptions { o.Authfile = &value @@ -226,3 +227,138 @@ func (o *ModifyOptions) GetSkipTLSVerify() bool { } return *o.SkipTLSVerify } + +// WithArtifactType set the ArtifactType in an artifact manifest being created +func (o *ModifyOptions) WithArtifactType(value *string) *ModifyOptions { + o.ArtifactType = &value + return o +} + +// GetArtifactType returns value of the ArtifactType in an artifact manifest being created +func (o *ModifyOptions) GetArtifactType() *string { + if o.ArtifactType == nil { + var z *string + return z + } + return *o.ArtifactType +} + +// WithArtifactConfigType set the config.MediaType in an artifact manifest being created +func (o *ModifyOptions) WithArtifactConfigType(value string) *ModifyOptions { + o.ArtifactConfigType = &value + return o +} + +// GetArtifactConfigType returns value of the config.MediaType in an artifact manifest being created +func (o *ModifyOptions) GetArtifactConfigType() string { + if o.ArtifactConfigType == nil { + var z string + return z + } + return *o.ArtifactConfigType +} + +// WithArtifactConfig set the config.Data in an artifact manifest being created +func (o *ModifyOptions) WithArtifactConfig(value string) *ModifyOptions { + o.ArtifactConfig = &value + return o +} + +// GetArtifactConfig returns value of the config.Data in an artifact manifest being created +func (o *ModifyOptions) GetArtifactConfig() string { + if o.ArtifactConfig == nil { + var z string + return z + } + return *o.ArtifactConfig +} + +// WithArtifactLayerType set the MediaType for each layer in an artifact manifest being created +func (o *ModifyOptions) WithArtifactLayerType(value string) *ModifyOptions { + o.ArtifactLayerType = &value + return o +} + +// GetArtifactLayerType returns value of the MediaType for each layer in an artifact manifest being created +func (o *ModifyOptions) GetArtifactLayerType() string { + if o.ArtifactLayerType == nil { + var z string + return z + } + return *o.ArtifactLayerType +} + +// WithArtifactExcludeTitles set whether or not to include title annotations for each layer in an artifact manifest being created +func (o *ModifyOptions) WithArtifactExcludeTitles(value bool) *ModifyOptions { + o.ArtifactExcludeTitles = &value + return o +} + +// GetArtifactExcludeTitles returns value of whether or not to include title annotations for each layer in an artifact manifest being created +func (o *ModifyOptions) GetArtifactExcludeTitles() bool { + if o.ArtifactExcludeTitles == nil { + var z bool + return z + } + return *o.ArtifactExcludeTitles +} + +// WithArtifactSubject set subject to set in an artifact manifest being created +func (o *ModifyOptions) WithArtifactSubject(value string) *ModifyOptions { + o.ArtifactSubject = &value + return o +} + +// GetArtifactSubject returns value of subject to set in an artifact manifest being created +func (o *ModifyOptions) GetArtifactSubject() string { + if o.ArtifactSubject == nil { + var z string + return z + } + return *o.ArtifactSubject +} + +// WithArtifactAnnotations set annotations to add to an artifact manifest being created +func (o *ModifyOptions) WithArtifactAnnotations(value map[string]string) *ModifyOptions { + o.ArtifactAnnotations = value + return o +} + +// GetArtifactAnnotations returns value of annotations to add to an artifact manifest being created +func (o *ModifyOptions) GetArtifactAnnotations() map[string]string { + if o.ArtifactAnnotations == nil { + var z map[string]string + return z + } + return o.ArtifactAnnotations +} + +// WithArtifactFiles set an optional list of files to add to a new artifact manifest in the manifest list +func (o *ModifyOptions) WithArtifactFiles(value []string) *ModifyOptions { + o.ArtifactFiles = &value + return o +} + +// GetArtifactFiles returns value of an optional list of files to add to a new artifact manifest in the manifest list +func (o *ModifyOptions) GetArtifactFiles() []string { + if o.ArtifactFiles == nil { + var z []string + return z + } + return *o.ArtifactFiles +} + +// WithBody set field Body to given value +func (o *ModifyOptions) WithBody(value io.Reader) *ModifyOptions { + o.Body = &value + return o +} + +// GetBody returns value of field Body +func (o *ModifyOptions) GetBody() io.Reader { + if o.Body == nil { + var z io.Reader + return z + } + return *o.Body +} diff --git a/pkg/bindings/network/network.go b/pkg/bindings/network/network.go index 0ff425313c..a0512d5ec4 100644 --- a/pkg/bindings/network/network.go +++ b/pkg/bindings/network/network.go @@ -70,8 +70,8 @@ func Update(ctx context.Context, netNameOrID string, options *UpdateOptions) err } // Inspect returns information about a network configuration -func Inspect(ctx context.Context, nameOrID string, _ *InspectOptions) (types.Network, error) { - var net types.Network +func Inspect(ctx context.Context, nameOrID string, _ *InspectOptions) (entitiesTypes.NetworkInspectReport, error) { + var net entitiesTypes.NetworkInspectReport conn, err := bindings.GetClient(ctx) if err != nil { return net, err diff --git a/pkg/bindings/pods/pods.go b/pkg/bindings/pods/pods.go index 88ceeca5e0..5cc78acc69 100644 --- a/pkg/bindings/pods/pods.go +++ b/pkg/bindings/pods/pods.go @@ -14,9 +14,7 @@ import ( ) func CreatePodFromSpec(ctx context.Context, spec *entitiesTypes.PodSpec) (*entitiesTypes.PodCreateReport, error) { - var ( - pcr entitiesTypes.PodCreateReport - ) + var pcr entitiesTypes.PodCreateReport if spec == nil { spec = new(entitiesTypes.PodSpec) } @@ -55,9 +53,7 @@ func Exists(ctx context.Context, nameOrID string, options *ExistsOptions) (bool, // Inspect returns low-level information about the given pod. func Inspect(ctx context.Context, nameOrID string, options *InspectOptions) (*entitiesTypes.PodInspectReport, error) { - var ( - report entitiesTypes.PodInspectReport - ) + var report entitiesTypes.PodInspectReport if options == nil { options = new(InspectOptions) } @@ -78,9 +74,7 @@ func Inspect(ctx context.Context, nameOrID string, options *InspectOptions) (*en // Kill sends a SIGTERM to all the containers in a pod. The optional signal parameter // can be used to override SIGTERM. func Kill(ctx context.Context, nameOrID string, options *KillOptions) (*entitiesTypes.PodKillReport, error) { - var ( - report entitiesTypes.PodKillReport - ) + var report entitiesTypes.PodKillReport if options == nil { options = new(KillOptions) } @@ -145,9 +139,7 @@ func Prune(ctx context.Context, options *PruneOptions) ([]*entitiesTypes.PodPrun // List returns all pods in local storage. The optional filters parameter can // be used to refine which pods should be listed. func List(ctx context.Context, options *ListOptions) ([]*entitiesTypes.ListPodsReport, error) { - var ( - podsReports []*entitiesTypes.ListPodsReport - ) + var podsReports []*entitiesTypes.ListPodsReport if options == nil { options = new(ListOptions) } @@ -231,6 +223,7 @@ func Start(ctx context.Context, nameOrID string, options *StartOptions) (*entiti if response.StatusCode == http.StatusNotModified { report.Id = nameOrID + report.RawInput = nameOrID return &report, nil } diff --git a/pkg/bindings/system/system.go b/pkg/bindings/system/system.go index e97ebc7b66..a41bfb1f8e 100644 --- a/pkg/bindings/system/system.go +++ b/pkg/bindings/system/system.go @@ -42,6 +42,10 @@ func Events(ctx context.Context, eventChan chan types.Event, cancelChan chan boo }() } + if response.StatusCode != http.StatusOK { + return response.Process(nil) + } + dec := json.NewDecoder(response.Body) for err = (error)(nil); err == nil; { var e = types.Event{} @@ -83,6 +87,26 @@ func Prune(ctx context.Context, options *PruneOptions) (*types.SystemPruneReport return &report, response.Process(&report) } +func Check(ctx context.Context, options *CheckOptions) (*types.SystemCheckReport, error) { + var report types.SystemCheckReport + + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params, err := options.ToParams() + if err != nil { + return nil, err + } + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/system/check", params, nil) + if err != nil { + return nil, err + } + defer response.Body.Close() + + return &report, response.Process(&report) +} + func Version(ctx context.Context, options *VersionOptions) (*types.SystemVersionReport, error) { var ( component types.SystemComponentVersion diff --git a/pkg/bindings/system/types.go b/pkg/bindings/system/types.go index 89e093f688..2342f7c497 100644 --- a/pkg/bindings/system/types.go +++ b/pkg/bindings/system/types.go @@ -38,3 +38,13 @@ type DiskOptions struct { //go:generate go run ../generator/generator.go InfoOptions type InfoOptions struct { } + +// CheckOptions are optional options for storage consistency check/repair +// +//go:generate go run ../generator/generator.go CheckOptions +type CheckOptions struct { + Quick *bool `schema:"quick"` + Repair *bool `schema:"repair"` + RepairLossy *bool `schema:"repair_lossy"` + UnreferencedLayerMaximumAge *string `schema:"unreferenced_layer_max_age"` +} diff --git a/pkg/bindings/system/types_check_options.go b/pkg/bindings/system/types_check_options.go new file mode 100644 index 0000000000..374f142d80 --- /dev/null +++ b/pkg/bindings/system/types_check_options.go @@ -0,0 +1,78 @@ +// Code generated by go generate; DO NOT EDIT. +package system + +import ( + "net/url" + + "github.com/containers/podman/v5/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *CheckOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *CheckOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithQuick set field Quick to given value +func (o *CheckOptions) WithQuick(value bool) *CheckOptions { + o.Quick = &value + return o +} + +// GetQuick returns value of field Quick +func (o *CheckOptions) GetQuick() bool { + if o.Quick == nil { + var z bool + return z + } + return *o.Quick +} + +// WithRepair set field Repair to given value +func (o *CheckOptions) WithRepair(value bool) *CheckOptions { + o.Repair = &value + return o +} + +// GetRepair returns value of field Repair +func (o *CheckOptions) GetRepair() bool { + if o.Repair == nil { + var z bool + return z + } + return *o.Repair +} + +// WithRepairLossy set field RepairLossy to given value +func (o *CheckOptions) WithRepairLossy(value bool) *CheckOptions { + o.RepairLossy = &value + return o +} + +// GetRepairLossy returns value of field RepairLossy +func (o *CheckOptions) GetRepairLossy() bool { + if o.RepairLossy == nil { + var z bool + return z + } + return *o.RepairLossy +} + +// WithUnreferencedLayerMaximumAge set field UnreferencedLayerMaximumAge to given value +func (o *CheckOptions) WithUnreferencedLayerMaximumAge(value string) *CheckOptions { + o.UnreferencedLayerMaximumAge = &value + return o +} + +// GetUnreferencedLayerMaximumAge returns value of field UnreferencedLayerMaximumAge +func (o *CheckOptions) GetUnreferencedLayerMaximumAge() string { + if o.UnreferencedLayerMaximumAge == nil { + var z string + return z + } + return *o.UnreferencedLayerMaximumAge +} diff --git a/pkg/bindings/test/networks_test.go b/pkg/bindings/test/networks_test.go index c7b61aee28..194bfaf40a 100644 --- a/pkg/bindings/test/networks_test.go +++ b/pkg/bindings/test/networks_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "slices" "time" "github.com/containers/common/libnetwork/types" @@ -13,7 +14,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/onsi/gomega/gexec" - "golang.org/x/exp/slices" ) var _ = Describe("Podman networks", func() { diff --git a/pkg/bindings/test/pods_test.go b/pkg/bindings/test/pods_test.go index 9db33af529..f621379114 100644 --- a/pkg/bindings/test/pods_test.go +++ b/pkg/bindings/test/pods_test.go @@ -3,6 +3,7 @@ package bindings_test import ( "fmt" "net/http" + "slices" "strings" "time" @@ -16,7 +17,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/onsi/gomega/gexec" - "golang.org/x/exp/slices" ) var _ = Describe("Podman pods", func() { diff --git a/pkg/bindings/test/volumes_test.go b/pkg/bindings/test/volumes_test.go index 7fc64178b4..ce9a8ff92f 100644 --- a/pkg/bindings/test/volumes_test.go +++ b/pkg/bindings/test/volumes_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "slices" "time" "github.com/containers/podman/v5/pkg/bindings" @@ -14,7 +15,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/onsi/gomega/gexec" - "golang.org/x/exp/slices" ) var _ = Describe("Podman volumes", func() { diff --git a/pkg/checkpoint/checkpoint_restore.go b/pkg/checkpoint/checkpoint_restore.go index e80e1cf1f3..3fae15764c 100644 --- a/pkg/checkpoint/checkpoint_restore.go +++ b/pkg/checkpoint/checkpoint_restore.go @@ -102,17 +102,18 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt if !crutils.CRRuntimeSupportsPodCheckpointRestore(runtime.GetOCIRuntimePath()) { return nil, fmt.Errorf("runtime %s does not support pod restore", runtime.GetOCIRuntimePath()) } - // Restoring into an existing Pod - ctrConfig.Pod = restoreOptions.Pod // According to podman pod create a pod can share the following namespaces: // cgroup, ipc, net, pid, uts // Let's make sure we are restoring into a pod with the same shared namespaces. - pod, err := runtime.LookupPod(ctrConfig.Pod) + pod, err := runtime.LookupPod(restoreOptions.Pod) if err != nil { return nil, fmt.Errorf("pod %q cannot be retrieved: %w", ctrConfig.Pod, err) } + // Restoring into an existing Pod + ctrConfig.Pod = pod.ID() + infraContainer, err := pod.InfraContainer() if err != nil { return nil, fmt.Errorf("cannot retrieve infra container from pod %q: %w", ctrConfig.Pod, err) diff --git a/pkg/checkpoint/crutils/checkpoint_restore_utils.go b/pkg/checkpoint/crutils/checkpoint_restore_utils.go index 07f3b769ff..3736ea1e2e 100644 --- a/pkg/checkpoint/crutils/checkpoint_restore_utils.go +++ b/pkg/checkpoint/crutils/checkpoint_restore_utils.go @@ -222,7 +222,7 @@ func CRRuntimeSupportsCheckpointRestore(runtimePath string) bool { return false } -// CRRuntimeSupportsCheckpointRestore tests if the runtime at 'runtimePath' +// CRRuntimeSupportsPodCheckpointRestore tests if the runtime at 'runtimePath' // supports restoring into existing Pods. The runtime needs to support // the CRIU option --lsm-mount-context and the existence of this is checked // by this function. In addition it is necessary to at least have CRIU 3.16. diff --git a/pkg/domain/entities/engine.go b/pkg/domain/entities/engine.go index 1a2fcefd03..7fcbc64953 100644 --- a/pkg/domain/entities/engine.go +++ b/pkg/domain/entities/engine.go @@ -48,6 +48,7 @@ type PodmanConfig struct { Trace bool // Hidden: Trace execution URI string // URI to RESTful API Service FarmNodeName string // Name of farm node + ConnectionError error // Error when looking up the connection in setupRemoteConnection() Runroot string ImageStore string @@ -57,4 +58,5 @@ type PodmanConfig struct { MachineMode bool TransientStore bool GraphRoot string + PullOptions []string } diff --git a/pkg/domain/entities/engine_container.go b/pkg/domain/entities/engine_container.go index 5c9c53517f..0e798be4e8 100644 --- a/pkg/domain/entities/engine_container.go +++ b/pkg/domain/entities/engine_container.go @@ -70,7 +70,7 @@ type ContainerEngine interface { //nolint:interfacebloat NetworkUpdate(ctx context.Context, networkname string, options NetworkUpdateOptions) error NetworkDisconnect(ctx context.Context, networkname string, options NetworkDisconnectOptions) error NetworkExists(ctx context.Context, networkname string) (*BoolReport, error) - NetworkInspect(ctx context.Context, namesOrIds []string, options InspectOptions) ([]netTypes.Network, []error, error) + NetworkInspect(ctx context.Context, namesOrIds []string, options InspectOptions) ([]NetworkInspectReport, []error, error) NetworkList(ctx context.Context, options NetworkListOptions) ([]netTypes.Network, error) NetworkPrune(ctx context.Context, options NetworkPruneOptions) ([]*NetworkPruneReport, error) NetworkReload(ctx context.Context, names []string, options NetworkReloadOptions) ([]*NetworkReloadReport, error) @@ -95,7 +95,7 @@ type ContainerEngine interface { //nolint:interfacebloat PodUnpause(ctx context.Context, namesOrIds []string, options PodunpauseOptions) ([]*PodUnpauseReport, error) Renumber(ctx context.Context) error Reset(ctx context.Context) error - SetupRootless(ctx context.Context, noMoveProcess bool) error + SetupRootless(ctx context.Context, noMoveProcess bool, cgroupMode string) error SecretCreate(ctx context.Context, name string, reader io.Reader, options SecretCreateOptions) (*SecretCreateReport, error) SecretInspect(ctx context.Context, nameOrIDs []string, options SecretInspectOptions) ([]*SecretInfoReport, []error, error) SecretList(ctx context.Context, opts SecretListRequest) ([]*SecretInfoReport, error) @@ -103,6 +103,7 @@ type ContainerEngine interface { //nolint:interfacebloat SecretExists(ctx context.Context, nameOrID string) (*BoolReport, error) Shutdown(ctx context.Context) SystemDf(ctx context.Context, options SystemDfOptions) (*SystemDfReport, error) + SystemCheck(ctx context.Context, options SystemCheckOptions) (*SystemCheckReport, error) Unshare(ctx context.Context, args []string, options SystemUnshareOptions) error Version(ctx context.Context) (*SystemVersionReport, error) VolumeCreate(ctx context.Context, opts VolumeCreateOptions) (*IDOrNameResponse, error) diff --git a/pkg/domain/entities/engine_image.go b/pkg/domain/entities/engine_image.go index 09941a39da..8179b9a67f 100644 --- a/pkg/domain/entities/engine_image.go +++ b/pkg/domain/entities/engine_image.go @@ -36,6 +36,7 @@ type ImageEngine interface { //nolint:interfacebloat ManifestExists(ctx context.Context, name string) (*BoolReport, error) ManifestInspect(ctx context.Context, name string, opts ManifestInspectOptions) ([]byte, error) ManifestAdd(ctx context.Context, listName string, imageNames []string, opts ManifestAddOptions) (string, error) + ManifestAddArtifact(ctx context.Context, name string, files []string, opts ManifestAddArtifactOptions) (string, error) ManifestAnnotate(ctx context.Context, names, image string, opts ManifestAnnotateOptions) (string, error) ManifestRemoveDigest(ctx context.Context, names, image string) (string, error) ManifestRm(ctx context.Context, names []string) (*ImageRemoveReport, []error) diff --git a/pkg/domain/entities/events.go b/pkg/domain/entities/events.go index f81f54bf2f..d9c7625533 100644 --- a/pkg/domain/entities/events.go +++ b/pkg/domain/entities/events.go @@ -33,11 +33,13 @@ func ConvertToLibpodEvent(e Event) *libpodEvents.Event { name := e.Actor.Attributes["name"] network := e.Actor.Attributes["network"] podID := e.Actor.Attributes["podId"] + errorString := e.Actor.Attributes["error"] details := e.Actor.Attributes delete(details, "image") delete(details, "name") delete(details, "network") delete(details, "podId") + delete(details, "error") delete(details, "containerExitCode") return &libpodEvents.Event{ ContainerExitCode: &exitCode, @@ -49,6 +51,7 @@ func ConvertToLibpodEvent(e Event) *libpodEvents.Event { Time: time.Unix(0, e.TimeNano), Type: t, HealthStatus: e.HealthStatus, + Error: errorString, Details: libpodEvents.Details{ PodID: podID, Attributes: details, @@ -71,6 +74,9 @@ func ConvertToEntitiesEvent(e libpodEvents.Event) *types.Event { if e.Network != "" { attributes["network"] = e.Network } + if e.Error != "" { + attributes["error"] = e.Error + } message := dockerEvents.Message{ // Compatibility with clients that still look for deprecated API elements Status: e.Status.String(), diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go index 96ed74617e..9b3a40eb71 100644 --- a/pkg/domain/entities/images.go +++ b/pkg/domain/entities/images.go @@ -51,7 +51,6 @@ func (i *Image) Id() string { //nolint:revive,stylecheck return i.ID } -// swagger:model LibpodImageSummary type ImageSummary = entitiesTypes.ImageSummary // ImageRemoveOptions can be used to alter image removal. @@ -151,6 +150,10 @@ type ImagePushOptions struct { // RemoveSignatures, discard any pre-existing signatures in the image. // Ignored for remote calls. RemoveSignatures bool + // Retry number of times to retry push in case of failure + Retry *uint + // RetryDelay between retries in case of push failures + RetryDelay string // SignaturePolicy to use when pulling. Ignored for remote calls. SignaturePolicy string // Signers, if non-empty, asks for signatures to be added during the copy @@ -238,8 +241,11 @@ type ImageSearchReport = entitiesTypes.ImageSearchReport // Image List Options type ImageListOptions struct { - All bool `json:"all" schema:"all"` - Filter []string `json:"Filter,omitempty"` + All bool + // ExtendedAttributes is used by the libpod endpoint only to deliver extra information + // that the compat endpoint does not + ExtendedAttributes bool + Filter []string } type ImagePruneOptions struct { diff --git a/pkg/domain/entities/manifest.go b/pkg/domain/entities/manifest.go index 303995903b..a81e76c076 100644 --- a/pkg/domain/entities/manifest.go +++ b/pkg/domain/entities/manifest.go @@ -5,7 +5,7 @@ import ( entitiesTypes "github.com/containers/podman/v5/pkg/domain/entities/types" ) -// ManifestCreateOptions provides model for creating manifest +// ManifestCreateOptions provides model for creating manifest list or image index type ManifestCreateOptions struct { // True when adding lists to include all images All bool `schema:"all"` @@ -13,6 +13,8 @@ type ManifestCreateOptions struct { Amend bool `schema:"amend"` // Should TLS registry certificate be verified? SkipTLSVerify types.OptionalBool `json:"-" schema:"-"` + // Annotations to set on the list, which forces it to be OCI format + Annotations map[string]string `json:"annotations" schema:"annotations"` } // ManifestInspectOptions provides model for inspecting manifest @@ -40,28 +42,51 @@ type ManifestAddOptions struct { SkipTLSVerify types.OptionalBool `json:"-" schema:"-"` // Username to authenticate to registry when pushing manifest list Username string `json:"-" schema:"-"` - // Images is an optional list of images to add to manifest list + // Images is an optional list of image references to add to manifest list Images []string `json:"images" schema:"images"` } +// ManifestAddArtifactOptions provides the model for creating artifact manifests +// for files and adding those manifests to a manifest list +// +// swagger:model +type ManifestAddArtifactOptions struct { + ManifestAnnotateOptions + // Note to future maintainers: keep these fields synchronized with ManifestModifyOptions! + Type *string `json:"artifact_type" schema:"artifact_type"` + LayerType string `json:"artifact_layer_type" schema:"artifact_layer_type"` + ConfigType string `json:"artifact_config_type" schema:"artifact_config_type"` + Config string `json:"artifact_config" schema:"artifact_config"` + ExcludeTitles bool `json:"artifact_exclude_titles" schema:"artifact_exclude_titles"` + Annotations map[string]string `json:"artifact_annotations" schema:"artifact_annotations"` + Subject string `json:"artifact_subject" schema:"artifact_subject"` + Files []string `json:"artifact_files" schema:"-"` +} + // ManifestAnnotateOptions provides model for annotating manifest list type ManifestAnnotateOptions struct { - // Annotation to add to manifest list + // Annotation to add to the item in the manifest list Annotation []string `json:"annotation" schema:"annotation"` - // Annotations to add to manifest list by a map which is preferred over Annotation + // Annotations to add to the item in the manifest list by a map which is preferred over Annotation Annotations map[string]string `json:"annotations" schema:"annotations"` - // Arch overrides the architecture for the image + // Arch overrides the architecture for the item in the manifest list Arch string `json:"arch" schema:"arch"` - // Feature list for the image + // Feature list for the item in the manifest list Features []string `json:"features" schema:"features"` - // OS overrides the operating system for the image + // OS overrides the operating system for the item in the manifest list OS string `json:"os" schema:"os"` - // OS features for the image + // OS features for the item in the manifest list OSFeatures []string `json:"os_features" schema:"os_features"` - // OSVersion overrides the operating system for the image + // OSVersion overrides the operating system for the item in the manifest list OSVersion string `json:"os_version" schema:"os_version"` - // Variant for the image + // Variant for the item in the manifest list Variant string `json:"variant" schema:"variant"` + // IndexAnnotation is a slice of key=value annotations to add to the manifest list itself + IndexAnnotation []string `json:"index_annotation" schema:"annotation"` + // IndexAnnotations is a map of key:value annotations to add to the manifest list itself, by a map which is preferred over IndexAnnotation + IndexAnnotations map[string]string `json:"index_annotations" schema:"annotations"` + // IndexSubject is a subject value to set in the manifest list itself + IndexSubject string `json:"subject" schema:"subject"` } // ManifestModifyOptions provides the model for mutating a manifest @@ -77,11 +102,21 @@ type ManifestModifyOptions struct { Operation string `json:"operation" schema:"operation"` // Valid values: update, remove, annotate ManifestAddOptions ManifestRemoveOptions + // The following are all of the fields from ManifestAddArtifactOptions. + // We can't just embed the whole structure because it embeds a + // ManifestAnnotateOptions, which would conflict with the one that + // ManifestAddOptions embeds. + ArtifactType *string `json:"artifact_type" schema:"artifact_type"` + ArtifactLayerType string `json:"artifact_layer_type" schema:"artifact_layer_type"` + ArtifactConfigType string `json:"artifact_config_type" schema:"artifact_config_type"` + ArtifactConfig string `json:"artifact_config" schema:"artifact_config"` + ArtifactExcludeTitles bool `json:"artifact_exclude_titles" schema:"artifact_exclude_titles"` + ArtifactAnnotations map[string]string `json:"artifact_annotations" schema:"artifact_annotations"` + ArtifactSubject string `json:"artifact_subject" schema:"artifact_subject"` + ArtifactFiles []string `json:"artifact_files" schema:"-"` } // ManifestPushReport provides the model for the pushed manifest -// -// swagger:model type ManifestPushReport = entitiesTypes.ManifestPushReport // ManifestRemoveOptions provides the model for removing digests from a manifest @@ -91,11 +126,7 @@ type ManifestRemoveOptions struct { } // ManifestRemoveReport provides the model for the removed manifest -// -// swagger:model type ManifestRemoveReport = entitiesTypes.ManifestRemoveReport // ManifestModifyReport provides the model for removed digests and changed manifest -// -// swagger:model type ManifestModifyReport = entitiesTypes.ManifestModifyReport diff --git a/pkg/domain/entities/network.go b/pkg/domain/entities/network.go index c2e223233c..74b6af5c83 100644 --- a/pkg/domain/entities/network.go +++ b/pkg/domain/entities/network.go @@ -75,10 +75,12 @@ type NetworkConnectOptions = entitiesTypes.NetworkConnectOptions // NetworkPruneReport containers the name of network and an error // associated in its pruning (removal) -// swagger:model NetworkPruneReport type NetworkPruneReport = entitiesTypes.NetworkPruneReport // NetworkPruneOptions describes options for pruning unused networks type NetworkPruneOptions struct { Filters map[string][]string } + +type NetworkInspectReport = entitiesTypes.NetworkInspectReport +type NetworkContainerInfo = entitiesTypes.NetworkContainerInfo diff --git a/pkg/domain/entities/pods.go b/pkg/domain/entities/pods.go index 51167a1183..63b88e3355 100644 --- a/pkg/domain/entities/pods.go +++ b/pkg/domain/entities/pods.go @@ -216,6 +216,8 @@ type ContainerCreateOptions struct { Restart string Replace bool Requires []string + Retry *uint `json:"retry,omitempty"` + RetryDelay string `json:"retry_delay,omitempty"` Rm bool RootFS bool Secrets []string diff --git a/pkg/domain/entities/system.go b/pkg/domain/entities/system.go index 5d11f0a326..ab9c861a02 100644 --- a/pkg/domain/entities/system.go +++ b/pkg/domain/entities/system.go @@ -9,6 +9,8 @@ type ServiceOptions = types.ServiceOptions type SystemPruneOptions = types.SystemPruneOptions type SystemPruneReport = types.SystemPruneReport type SystemMigrateOptions = types.SystemMigrateOptions +type SystemCheckOptions = types.SystemCheckOptions +type SystemCheckReport = types.SystemCheckReport type SystemDfOptions = types.SystemDfOptions type SystemDfReport = types.SystemDfReport type SystemDfImageReport = types.SystemDfImageReport @@ -19,7 +21,6 @@ type SystemUnshareOptions = types.SystemUnshareOptions type ComponentVersion = types.SystemComponentVersion type ListRegistriesReport = types.ListRegistriesReport -// swagger:model AuthConfig type AuthConfig = types.AuthConfig type AuthReport = types.AuthReport type LocksReport = types.LocksReport diff --git a/pkg/domain/entities/types/auth.go b/pkg/domain/entities/types/auth.go index 7f2480173f..3fa66987a1 100644 --- a/pkg/domain/entities/types/auth.go +++ b/pkg/domain/entities/types/auth.go @@ -2,6 +2,7 @@ package types // AuthConfig contains authorization information for connecting to a Registry +// swagger:model type AuthConfig struct { Username string `json:"username,omitempty"` Password string `json:"password,omitempty"` diff --git a/pkg/domain/entities/types/container_ps.go b/pkg/domain/entities/types/container_ps.go index 73f70dbe4c..139a87c036 100644 --- a/pkg/domain/entities/types/container_ps.go +++ b/pkg/domain/entities/types/container_ps.go @@ -25,6 +25,11 @@ type ListContainer struct { ExitedAt int64 // If container has exited, the return code from the command ExitCode int32 + // ExposedPorts contains the ports that are exposed but not forwarded, + // see Ports for forwarded ports. + // The key is the port number and the string slice contains the protocols, + // i.e. "tcp", "udp" and "sctp". + ExposedPorts map[uint16][]string // The unique identifier for the container ID string `json:"Id"` // Container image diff --git a/pkg/domain/entities/types/images.go b/pkg/domain/entities/types/images.go index 1a63d5d809..48344cfa9f 100644 --- a/pkg/domain/entities/types/images.go +++ b/pkg/domain/entities/types/images.go @@ -23,9 +23,15 @@ type ImageSummary struct { Dangling bool `json:",omitempty"` // Podman extensions - Names []string `json:",omitempty"` + Arch string `json:",omitempty"` Digest string `json:",omitempty"` History []string `json:",omitempty"` + // IsManifestList is a ptr so we can distinguish between a true + // json empty response and false. the docker compat side needs to return + // empty; where as the libpod side needs a value of true or false + IsManifestList *bool `json:",omitempty"` + Names []string `json:",omitempty"` + Os string `json:",omitempty"` } func (i *ImageSummary) Id() string { //nolint:revive,stylecheck diff --git a/pkg/domain/entities/types/manifest.go b/pkg/domain/entities/types/manifest.go index 941c3265ef..493950bc72 100644 --- a/pkg/domain/entities/types/manifest.go +++ b/pkg/domain/entities/types/manifest.go @@ -14,8 +14,10 @@ type ManifestPushReport struct { type ManifestModifyReport struct { // Manifest List ID ID string `json:"Id"` - // Images to removed from manifest list, otherwise not provided. + // Images added to or removed from manifest list, otherwise not provided. Images []string `json:"images,omitempty" schema:"images"` + // Files added to manifest list, otherwise not provided. + Files []string `json:"files,omitempty" schema:"files"` // Errors associated with operation Errors []error `json:"errors,omitempty"` } diff --git a/pkg/domain/entities/types/network.go b/pkg/domain/entities/types/network.go index 8c547ee94d..ae233b8232 100644 --- a/pkg/domain/entities/types/network.go +++ b/pkg/domain/entities/types/network.go @@ -35,3 +35,17 @@ type NetworkRmReport struct { type NetworkCreateReport struct { Name string } + +type NetworkInspectReport struct { + commonTypes.Network + + Containers map[string]NetworkContainerInfo `json:"containers"` +} + +type NetworkContainerInfo struct { + // Name of the container + Name string `json:"name"` + + // Interfaces configured for this container with their addresses + Interfaces map[string]commonTypes.NetInterface `json:"interfaces,omitempty"` +} diff --git a/pkg/domain/entities/types/pods.go b/pkg/domain/entities/types/pods.go index 4a42558006..eaaef1501b 100644 --- a/pkg/domain/entities/types/pods.go +++ b/pkg/domain/entities/types/pods.go @@ -22,8 +22,9 @@ type PodUnpauseReport struct { } type PodStopReport struct { - Errs []error - Id string //nolint:revive,stylecheck + Errs []error + Id string //nolint:revive,stylecheck + RawInput string } type PodRestartReport struct { @@ -32,8 +33,9 @@ type PodRestartReport struct { } type PodStartReport struct { - Errs []error - Id string //nolint:revive,stylecheck + Errs []error + Id string //nolint:revive,stylecheck + RawInput string } type PodRmReport struct { diff --git a/pkg/domain/entities/types/system.go b/pkg/domain/entities/types/system.go index 3d1361ef01..6c331cd50e 100644 --- a/pkg/domain/entities/types/system.go +++ b/pkg/domain/entities/types/system.go @@ -15,6 +15,28 @@ type ServiceOptions struct { URI string // Path to unix domain socket service should listen on } +// SystemCheckOptions provides options for checking storage consistency. +type SystemCheckOptions struct { + Quick bool // skip the most time-intensive checks + Repair bool // remove damaged images + RepairLossy bool // remove damaged containers + UnreferencedLayerMaximumAge *time.Duration // maximum allowed age for unreferenced layers +} + +// SystemCheckReport provides a report of what a storage consistency check +// found, and if we removed anything that was damaged, what we removed. +type SystemCheckReport struct { + Errors bool // any errors were detected + Layers map[string][]string // layer ID → what was detected + ROLayers map[string][]string // layer ID → what was detected + RemovedLayers []string // layer ID + Images map[string][]string // image ID → what was detected + ROImages map[string][]string // image ID → what was detected + RemovedImages map[string][]string // image ID → names + Containers map[string][]string // container ID → what was detected + RemovedContainers map[string]string // container ID → name +} + // SystemPruneOptions provides options to prune system. type SystemPruneOptions struct { All bool diff --git a/pkg/domain/entities/volumes.go b/pkg/domain/entities/volumes.go index a0acd3a7f0..050e28ce0c 100644 --- a/pkg/domain/entities/volumes.go +++ b/pkg/domain/entities/volumes.go @@ -7,7 +7,6 @@ import ( ) // VolumeCreateOptions provides details for creating volumes -// swagger:model type VolumeCreateOptions = types.VolumeCreateOptions type VolumeConfigResponse = types.VolumeConfigResponse diff --git a/pkg/domain/filters/containers.go b/pkg/domain/filters/containers.go index d69b46fa32..b4a683631c 100644 --- a/pkg/domain/filters/containers.go +++ b/pkg/domain/filters/containers.go @@ -5,6 +5,7 @@ package filters import ( "errors" "fmt" + "slices" "strconv" "strings" "time" @@ -13,7 +14,6 @@ import ( "github.com/containers/common/pkg/util" "github.com/containers/podman/v5/libpod" "github.com/containers/podman/v5/libpod/define" - "golang.org/x/exp/slices" ) // GenerateContainerFilterFuncs return ContainerFilter functions based of filter. diff --git a/pkg/domain/filters/pods.go b/pkg/domain/filters/pods.go index 3529f01276..c009028868 100644 --- a/pkg/domain/filters/pods.go +++ b/pkg/domain/filters/pods.go @@ -5,6 +5,7 @@ package filters import ( "errors" "fmt" + "slices" "strconv" "strings" @@ -12,7 +13,6 @@ import ( "github.com/containers/common/pkg/util" "github.com/containers/podman/v5/libpod" "github.com/containers/podman/v5/libpod/define" - "golang.org/x/exp/slices" ) // GeneratePodFilterFunc takes a filter and filtervalue (key, value) diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go index 9016398bb5..4f0fd63094 100644 --- a/pkg/domain/infra/abi/containers.go +++ b/pkg/domain/infra/abi/containers.go @@ -995,7 +995,10 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri return reports, fmt.Errorf("unable to start container %s: %w", ctr.ID(), err) } - exitCode = ic.GetContainerExitCode(ctx, ctr.Container) + exitCode, err2 := ic.ContainerWaitForExitCode(ctx, ctr.Container) + if err2 != nil { + logrus.Errorf("Waiting for container %s: %v", ctr.ID(), err2) + } reports = append(reports, &entities.ContainerStartReport{ Id: ctr.ID(), RawInput: ctr.rawInput, @@ -1189,7 +1192,7 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta report.ExitCode = define.ExitCode(err) return &report, err } - report.ExitCode = ic.GetContainerExitCode(ctx, ctr) + report.ExitCode, _ = ic.ContainerWaitForExitCode(ctx, ctr) if opts.Rm && !ctr.ShouldRestart(ctx) { if err := removeContainer(ctr, false); err != nil { if errors.Is(err, define.ErrNoSuchCtr) || @@ -1203,14 +1206,13 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta return &report, nil } -func (ic *ContainerEngine) GetContainerExitCode(ctx context.Context, ctr *libpod.Container) int { +func (ic *ContainerEngine) ContainerWaitForExitCode(ctx context.Context, ctr *libpod.Container) (int, error) { exitCode, err := ctr.Wait(ctx) if err != nil { - logrus.Errorf("Waiting for container %s: %v", ctr.ID(), err) intExitCode := int(define.ExecErrorCodeNotFound) - return intExitCode + return intExitCode, err } - return int(exitCode) + return int(exitCode), nil } func (ic *ContainerEngine) ContainerLogs(ctx context.Context, namesOrIds []string, options entities.ContainerLogsOptions) error { @@ -1569,12 +1571,7 @@ func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []stri go func() { defer close(statsChan) - var ( - err error - containers []*libpod.Container - containerStats map[string]*define.ContainerStats - ) - containerStats = make(map[string]*define.ContainerStats) + containerStats := make(map[string]*define.ContainerStats) stream: // label to flatten the scope select { @@ -1588,7 +1585,7 @@ func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []stri // Anonymous func to easily use the return values for streaming. computeStats := func() ([]define.ContainerStats, error) { - containers, err = containerFunc() + containers, err := containerFunc() if err != nil { return nil, fmt.Errorf("unable to get list of containers: %w", err) } @@ -1767,7 +1764,12 @@ func (ic *ContainerEngine) ContainerUpdate(ctx context.Context, updateOptions *e return "", fmt.Errorf("container not found") } - if err = containers[0].Update(updateOptions.Specgen.ResourceLimits); err != nil { + var restartPolicy *string + if updateOptions.Specgen.RestartPolicy != "" { + restartPolicy = &updateOptions.Specgen.RestartPolicy + } + + if err = containers[0].Update(updateOptions.Specgen.ResourceLimits, restartPolicy, updateOptions.Specgen.RestartRetries); err != nil { return "", err } return containers[0].ID(), nil diff --git a/pkg/domain/infra/abi/containers_runlabel.go b/pkg/domain/infra/abi/containers_runlabel.go index 2ee64441fc..2a4119996f 100644 --- a/pkg/domain/infra/abi/containers_runlabel.go +++ b/pkg/domain/infra/abi/containers_runlabel.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "io/fs" "os" "path/filepath" "strings" @@ -14,6 +15,7 @@ import ( "github.com/containers/podman/v5/pkg/domain/entities" envLib "github.com/containers/podman/v5/pkg/env" "github.com/containers/podman/v5/utils" + "github.com/containers/storage/pkg/fileutils" "github.com/google/shlex" "github.com/sirupsen/logrus" ) @@ -171,6 +173,13 @@ func generateRunlabelCommand(runlabel string, img *libimage.Image, inputName str return "" } return d + case "HOME": + h, err := os.UserHomeDir() + if err != nil { + logrus.Warnf("Unable to determine user's home directory: %s", err) + return "" + } + return h } return "" } @@ -276,7 +285,7 @@ func substituteCommand(cmd string) (string, error) { if err != nil { return "", err } - if _, err := os.Stat(res); !os.IsNotExist(err) { + if err := fileutils.Exists(res); !errors.Is(err, fs.ErrNotExist) { return res, nil } else if err != nil { return "", err diff --git a/pkg/domain/infra/abi/generate.go b/pkg/domain/infra/abi/generate.go index e48e52d446..e424500fbd 100644 --- a/pkg/domain/infra/abi/generate.go +++ b/pkg/domain/infra/abi/generate.go @@ -207,7 +207,7 @@ func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrIDs []string, // Generate the kube pods from containers. if len(ctrs) >= 1 { - po, err := libpod.GenerateForKube(ctx, ctrs, options.Service, options.UseLongAnnotations, options.PodmanOnly) + po, err := libpod.GenerateForKube(ctx, ctrs, options.Service, options.PodmanOnly) if err != nil { return nil, err } @@ -283,7 +283,7 @@ func getKubePods(ctx context.Context, pods []*libpod.Pod, options entities.Gener svcs := [][]byte{} for _, p := range pods { - po, sp, err := p.GenerateForKube(ctx, options.Service, options.UseLongAnnotations, options.PodmanOnly) + po, sp, err := p.GenerateForKube(ctx, options.Service, options.PodmanOnly) if err != nil { return nil, nil, err } diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go index be574f090c..f7d5356aed 100644 --- a/pkg/domain/infra/abi/images.go +++ b/pkg/domain/infra/abi/images.go @@ -254,8 +254,8 @@ func (ir *ImageEngine) Pull(ctx context.Context, rawImage string, options entiti pullOptions.InsecureSkipTLSVerify = options.SkipTLSVerify pullOptions.Writer = options.Writer pullOptions.OciDecryptConfig = options.OciDecryptConfig - pullOptions.MaxRetries = options.Retry + if options.RetryDelay != "" { duration, err := time.ParseDuration(options.RetryDelay) if err != nil { @@ -343,6 +343,14 @@ func (ir *ImageEngine) Push(ctx context.Context, source string, destination stri pushOptions.OciEncryptLayers = options.OciEncryptLayers pushOptions.CompressionLevel = options.CompressionLevel pushOptions.ForceCompressionFormat = options.ForceCompressionFormat + pushOptions.MaxRetries = options.Retry + if options.RetryDelay != "" { + duration, err := time.ParseDuration(options.RetryDelay) + if err != nil { + return nil, err + } + pushOptions.RetryDelay = &duration + } compressionFormat := options.CompressionFormat if compressionFormat == "" { diff --git a/pkg/domain/infra/abi/images_list.go b/pkg/domain/infra/abi/images_list.go index 2dafc8218b..6f6d575711 100644 --- a/pkg/domain/infra/abi/images_list.go +++ b/pkg/domain/infra/abi/images_list.go @@ -3,11 +3,11 @@ package abi import ( "context" "fmt" + "slices" "github.com/containers/common/libimage" "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/pkg/domain/entities" - "golang.org/x/exp/slices" ) func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) ([]*entities.ImageSummary, error) { @@ -57,6 +57,21 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) RepoTags: img.Names(), // may include tags and digests ParentId: parentID, } + if opts.ExtendedAttributes { + iml, err := img.IsManifestList(ctx) + if err != nil { + return nil, err + } + s.IsManifestList = &iml + if !iml { + imgData, err := img.Inspect(ctx, nil) + if err != nil { + return nil, err + } + s.Arch = imgData.Architecture + s.Os = imgData.Os + } + } s.Labels, err = img.Labels(ctx) if err != nil { return nil, fmt.Errorf("retrieving label for image %q: you may need to remove the image to resolve the error: %w", img.ID(), err) diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE b/pkg/domain/infra/abi/internal/expansion/LICENSE similarity index 100% rename from vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE rename to pkg/domain/infra/abi/internal/expansion/LICENSE diff --git a/pkg/domain/infra/abi/internal/expansion/README.md b/pkg/domain/infra/abi/internal/expansion/README.md new file mode 100644 index 0000000000..23a7a3d142 --- /dev/null +++ b/pkg/domain/infra/abi/internal/expansion/README.md @@ -0,0 +1,5 @@ +Copied from https://github.com/kubernetes/kubernetes/tree/master/third_party/forked/golang/expansion . + +This is to eliminate a direct dependency on `k8s.io/kubernetes`. + +Ref: https://github.com/kubernetes/kubernetes/issues/79384#issuecomment-505627280 diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/expansion/expand.go b/pkg/domain/infra/abi/internal/expansion/expand.go similarity index 100% rename from vendor/k8s.io/kubernetes/third_party/forked/golang/expansion/expand.go rename to pkg/domain/infra/abi/internal/expansion/expand.go diff --git a/pkg/domain/infra/abi/internal/expansion/expand_test.go b/pkg/domain/infra/abi/internal/expansion/expand_test.go new file mode 100644 index 0000000000..45f32edfc6 --- /dev/null +++ b/pkg/domain/infra/abi/internal/expansion/expand_test.go @@ -0,0 +1,287 @@ +package expansion + +import ( + "testing" +) + +func TestMapReference(t *testing.T) { + type envVar struct { + Name string + Value string + } + envs := []envVar{ + { + Name: "FOO", + Value: "bar", + }, + { + Name: "ZOO", + Value: "$(FOO)-1", + }, + { + Name: "BLU", + Value: "$(ZOO)-2", + }, + } + + declaredEnv := map[string]string{ + "FOO": "bar", + "ZOO": "$(FOO)-1", + "BLU": "$(ZOO)-2", + } + + serviceEnv := map[string]string{} + + mapping := MappingFuncFor(declaredEnv, serviceEnv) + + for _, env := range envs { + declaredEnv[env.Name] = Expand(env.Value, mapping) + } + + expectedEnv := map[string]string{ + "FOO": "bar", + "ZOO": "bar-1", + "BLU": "bar-1-2", + } + + for k, v := range expectedEnv { + if e, a := v, declaredEnv[k]; e != a { + t.Errorf("Expected %v, got %v", e, a) + } else { + delete(declaredEnv, k) + } + } + + if len(declaredEnv) != 0 { + t.Errorf("Unexpected keys in declared env: %v", declaredEnv) + } +} + +func TestMapping(t *testing.T) { + context := map[string]string{ + "VAR_A": "A", + "VAR_B": "B", + "VAR_C": "C", + "VAR_REF": "$(VAR_A)", + "VAR_EMPTY": "", + } + mapping := MappingFuncFor(context) + + doExpansionTest(t, mapping) +} + +func TestMappingDual(t *testing.T) { + context := map[string]string{ + "VAR_A": "A", + "VAR_EMPTY": "", + } + context2 := map[string]string{ + "VAR_B": "B", + "VAR_C": "C", + "VAR_REF": "$(VAR_A)", + } + mapping := MappingFuncFor(context, context2) + + doExpansionTest(t, mapping) +} + +func doExpansionTest(t *testing.T, mapping func(string) string) { + cases := []struct { + name string + input string + expected string + }{ + { + name: "whole string", + input: "$(VAR_A)", + expected: "A", + }, + { + name: "repeat", + input: "$(VAR_A)-$(VAR_A)", + expected: "A-A", + }, + { + name: "beginning", + input: "$(VAR_A)-1", + expected: "A-1", + }, + { + name: "middle", + input: "___$(VAR_B)___", + expected: "___B___", + }, + { + name: "end", + input: "___$(VAR_C)", + expected: "___C", + }, + { + name: "compound", + input: "$(VAR_A)_$(VAR_B)_$(VAR_C)", + expected: "A_B_C", + }, + { + name: "escape & expand", + input: "$$(VAR_B)_$(VAR_A)", + expected: "$(VAR_B)_A", + }, + { + name: "compound escape", + input: "$$(VAR_A)_$$(VAR_B)", + expected: "$(VAR_A)_$(VAR_B)", + }, + { + name: "mixed in escapes", + input: "f000-$$VAR_A", + expected: "f000-$VAR_A", + }, + { + name: "backslash escape ignored", + input: "foo\\$(VAR_C)bar", + expected: "foo\\Cbar", + }, + { + name: "backslash escape ignored", + input: "foo\\\\$(VAR_C)bar", + expected: "foo\\\\Cbar", + }, + { + name: "lots of backslashes", + input: "foo\\\\\\\\$(VAR_A)bar", + expected: "foo\\\\\\\\Abar", + }, + { + name: "nested var references", + input: "$(VAR_A$(VAR_B))", + expected: "$(VAR_A$(VAR_B))", + }, + { + name: "nested var references second type", + input: "$(VAR_A$(VAR_B)", + expected: "$(VAR_A$(VAR_B)", + }, + { + name: "value is a reference", + input: "$(VAR_REF)", + expected: "$(VAR_A)", + }, + { + name: "value is a reference x 2", + input: "%%$(VAR_REF)--$(VAR_REF)%%", + expected: "%%$(VAR_A)--$(VAR_A)%%", + }, + { + name: "empty var", + input: "foo$(VAR_EMPTY)bar", + expected: "foobar", + }, + { + name: "unterminated expression", + input: "foo$(VAR_Awhoops!", + expected: "foo$(VAR_Awhoops!", + }, + { + name: "expression without operator", + input: "f00__(VAR_A)__", + expected: "f00__(VAR_A)__", + }, + { + name: "shell special vars pass through", + input: "$?_boo_$!", + expected: "$?_boo_$!", + }, + { + name: "bare operators are ignored", + input: "$VAR_A", + expected: "$VAR_A", + }, + { + name: "undefined vars are passed through", + input: "$(VAR_DNE)", + expected: "$(VAR_DNE)", + }, + { + name: "multiple (even) operators, var undefined", + input: "$$$$$$(BIG_MONEY)", + expected: "$$$(BIG_MONEY)", + }, + { + name: "multiple (even) operators, var defined", + input: "$$$$$$(VAR_A)", + expected: "$$$(VAR_A)", + }, + { + name: "multiple (odd) operators, var undefined", + input: "$$$$$$$(GOOD_ODDS)", + expected: "$$$$(GOOD_ODDS)", + }, + { + name: "multiple (odd) operators, var defined", + input: "$$$$$$$(VAR_A)", + expected: "$$$A", + }, + { + name: "missing open expression", + input: "$VAR_A)", + expected: "$VAR_A)", + }, + { + name: "shell syntax ignored", + input: "${VAR_A}", + expected: "${VAR_A}", + }, + { + name: "trailing incomplete expression not consumed", + input: "$(VAR_B)_______$(A", + expected: "B_______$(A", + }, + { + name: "trailing incomplete expression, no content, is not consumed", + input: "$(VAR_C)_______$(", + expected: "C_______$(", + }, + { + name: "operator at end of input string is preserved", + input: "$(VAR_A)foobarzab$", + expected: "Afoobarzab$", + }, + { + name: "shell escaped incomplete expr", + input: "foo-\\$(VAR_A", + expected: "foo-\\$(VAR_A", + }, + { + name: "lots of $( in middle", + input: "--$($($($($--", + expected: "--$($($($($--", + }, + { + name: "lots of $( in beginning", + input: "$($($($($--foo$(", + expected: "$($($($($--foo$(", + }, + { + name: "lots of $( at end", + input: "foo0--$($($($(", + expected: "foo0--$($($($(", + }, + { + name: "escaped operators in variable names are not escaped", + input: "$(foo$$var)", + expected: "$(foo$$var)", + }, + { + name: "newline not expanded", + input: "\n", + expected: "\n", + }, + } + + for _, tc := range cases { + expanded := Expand(tc.input, mapping) + if e, a := tc.expected, expanded; e != a { + t.Errorf("%v: expected %q, got %q", tc.name, e, a) + } + } +} diff --git a/pkg/domain/infra/abi/manifest.go b/pkg/domain/infra/abi/manifest.go index 65037ce5cc..144740ec88 100644 --- a/pkg/domain/infra/abi/manifest.go +++ b/pkg/domain/infra/abi/manifest.go @@ -4,14 +4,16 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "os" + "path" + "slices" "strings" - "errors" - "github.com/containers/common/libimage" cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/docker" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/pkg/shortnames" @@ -45,6 +47,14 @@ func (ir *ImageEngine) ManifestCreate(ctx context.Context, name string, images [ } } + annotateOptions := &libimage.ManifestListAnnotateOptions{} + if len(opts.Annotations) != 0 { + annotateOptions.IndexAnnotations = opts.Annotations + if err := manifestList.AnnotateInstance("", annotateOptions); err != nil { + return "", err + } + } + addOptions := &libimage.ManifestListAddOptions{All: opts.All} for _, image := range images { if _, err := manifestList.Add(ctx, image, addOptions); err != nil { @@ -214,6 +224,13 @@ func (ir *ImageEngine) ManifestAdd(ctx context.Context, name string, images []st Password: opts.Password, } + images = slices.Clone(images) + for _, image := range opts.Images { + if !slices.Contains(images, image) { + images = append(images, image) + } + } + for _, image := range images { instanceDigest, err := manifestList.Add(ctx, image, addOptions) if err != nil { @@ -226,6 +243,7 @@ func (ir *ImageEngine) ManifestAdd(ctx context.Context, name string, images []st OS: opts.OS, OSVersion: opts.OSVersion, Variant: opts.Variant, + Subject: opts.IndexSubject, } if len(opts.Annotation) != 0 { annotations := make(map[string]string) @@ -247,11 +265,99 @@ func (ir *ImageEngine) ManifestAdd(ctx context.Context, name string, images []st return manifestList.ID(), nil } +func mergeAnnotations(preferred map[string]string, aux []string) (map[string]string, error) { + if len(aux) != 0 { + auxAnnotations := make(map[string]string) + for _, annotationSpec := range aux { + key, val, hasVal := strings.Cut(annotationSpec, "=") + if !hasVal { + return nil, fmt.Errorf("no value given for annotation %q", key) + } + auxAnnotations[key] = val + } + if preferred == nil { + preferred = make(map[string]string) + } + preferred = envLib.Join(auxAnnotations, preferred) + } + return preferred, nil +} + // ManifestAnnotate updates an entry of the manifest list func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, name, image string, opts entities.ManifestAnnotateOptions) (string, error) { - instanceDigest, err := digest.Parse(image) + manifestList, err := ir.Libpod.LibimageRuntime().LookupManifestList(name) if err != nil { - return "", fmt.Errorf(`invalid image digest "%s": %v`, image, err) + return "", err + } + + annotateOptions := &libimage.ManifestListAnnotateOptions{ + Architecture: opts.Arch, + Features: opts.Features, + OS: opts.OS, + OSVersion: opts.OSVersion, + Variant: opts.Variant, + Subject: opts.IndexSubject, + } + if annotateOptions.Annotations, err = mergeAnnotations(opts.Annotations, opts.Annotation); err != nil { + return "", err + } + if annotateOptions.IndexAnnotations, err = mergeAnnotations(opts.IndexAnnotations, opts.IndexAnnotation); err != nil { + return "", err + } + + var instanceDigest digest.Digest + if image == "" { + if len(opts.Annotations) != 0 { + return "", errors.New("setting annotation on an item in a manifest list requires an instance digest") + } + if len(opts.Annotation) != 0 { + return "", errors.New("setting annotation on an item in a manifest list requires an instance digest") + } + if opts.Arch != "" { + return "", errors.New("setting architecture on an item in a manifest list requires an instance digest") + } + if len(opts.Features) != 0 { + return "", errors.New("setting features on an item in a manifest list requires an instance digest") + } + if opts.OS != "" { + return "", errors.New("setting OS on an item in a manifest list requires an instance digest") + } + if len(opts.OSFeatures) != 0 { + return "", errors.New("setting OS features on an item in a manifest list requires an instance digest") + } + if opts.OSVersion != "" { + return "", errors.New("setting OS version on an item in a manifest list requires an instance digest") + } + if opts.Variant != "" { + return "", errors.New("setting variant on an item in a manifest list requires an instance digest") + } + } else { + if len(opts.IndexAnnotations) != 0 { + return "", errors.New("setting index-wide annotation in a manifest list requires no instance digest") + } + if len(opts.IndexAnnotation) != 0 { + return "", errors.New("setting index-wide annotation in a manifest list requires no instance digest") + } + if len(opts.IndexSubject) != 0 { + return "", errors.New("setting subject for a manifest list requires no instance digest") + } + instanceDigest, err = ir.digestFromDigestOrManifestListMember(ctx, manifestList, image) + if err != nil { + return "", fmt.Errorf("finding instance for %q: %w", image, err) + } + } + + if err := manifestList.AnnotateInstance(instanceDigest, annotateOptions); err != nil { + return "", err + } + + return manifestList.ID(), nil +} + +// ManifestAddArtifact creates artifact manifest for files and adds them to the manifest list +func (ir *ImageEngine) ManifestAddArtifact(ctx context.Context, name string, files []string, opts entities.ManifestAddArtifactOptions) (string, error) { + if len(files) < 1 { + return "", errors.New("manifest add artifact requires at least one file") } manifestList, err := ir.Libpod.LibimageRuntime().LookupManifestList(name) @@ -259,25 +365,42 @@ func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, name, image string, return "", err } + files = slices.Clone(files) + for _, file := range opts.Files { + if !slices.Contains(files, file) { + files = append(files, file) + } + } + + addArtifactOptions := &libimage.ManifestListAddArtifactOptions{ + Type: opts.Type, + ConfigType: opts.ConfigType, + Config: opts.Config, + LayerType: opts.LayerType, + ExcludeTitles: opts.ExcludeTitles, + Annotations: opts.Annotations, + Subject: opts.Subject, + } + + instanceDigest, err := manifestList.AddArtifact(ctx, addArtifactOptions, files...) + if err != nil { + return "", err + } + annotateOptions := &libimage.ManifestListAnnotateOptions{ Architecture: opts.Arch, Features: opts.Features, OS: opts.OS, OSVersion: opts.OSVersion, Variant: opts.Variant, + Subject: opts.IndexSubject, } - if len(opts.Annotation) != 0 { - annotations := make(map[string]string) - for _, annotationSpec := range opts.Annotation { - key, val, hasVal := strings.Cut(annotationSpec, "=") - if !hasVal { - return "", fmt.Errorf("no value given for annotation %q", key) - } - annotations[key] = val - } - opts.Annotations = envLib.Join(opts.Annotations, annotations) + if annotateOptions.Annotations, err = mergeAnnotations(opts.Annotations, opts.Annotation); err != nil { + return "", err + } + if annotateOptions.IndexAnnotations, err = mergeAnnotations(opts.IndexAnnotations, opts.IndexAnnotation); err != nil { + return "", err } - annotateOptions.Annotations = opts.Annotations if err := manifestList.AnnotateInstance(instanceDigest, annotateOptions); err != nil { return "", err @@ -286,6 +409,53 @@ func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, name, image string, return manifestList.ID(), nil } +func (ir *ImageEngine) digestFromDigestOrManifestListMember(ctx context.Context, list *libimage.ManifestList, name string) (digest.Digest, error) { + instanceDigest, err := digest.Parse(name) + if err == nil { + return instanceDigest, nil + } + listData, inspectErr := list.Inspect() + if inspectErr != nil { + return "", fmt.Errorf(`inspecting list "%s" for instance list: %v`, list.ID(), err) + } + // maybe the name is a file name we previously attached as part of an artifact manifest + for _, descriptor := range listData.Manifests { + if slices.Contains(descriptor.Files, path.Base(name)) || slices.Contains(descriptor.Files, name) { + return descriptor.Digest, nil + } + } + // maybe it's the name of an image we added to the list? + ref, err := alltransports.ParseImageName(name) + if err != nil { + withDocker := fmt.Sprintf("%s://%s", docker.Transport.Name(), name) + ref, err = alltransports.ParseImageName(withDocker) + if err != nil { + image, _, err := ir.Libpod.LibimageRuntime().LookupImage(name, &libimage.LookupImageOptions{ManifestList: true}) + if err != nil { + return "", fmt.Errorf("locating image named %q to check if it's in the manifest list: %w", name, err) + } + if ref, err = image.StorageReference(); err != nil { + return "", fmt.Errorf("reading image reference %q to check if it's in the manifest list: %w", name, err) + } + } + } + // read the manifest of this image + src, err := ref.NewImageSource(ctx, ir.Libpod.SystemContext()) + if err != nil { + return "", fmt.Errorf("reading local image %q to check if it's in the manifest list: %w", name, err) + } + defer src.Close() + manifestBytes, _, err := src.GetManifest(ctx, nil) + if err != nil { + return "", fmt.Errorf("locating image named %q to check if it's in the manifest list: %w", name, err) + } + refDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return "", fmt.Errorf("digesting manifest of local image %q: %w", name, err) + } + return refDigest, nil +} + // ManifestRemoveDigest removes specified digest from the specified manifest list func (ir *ImageEngine) ManifestRemoveDigest(ctx context.Context, name, image string) (string, error) { instanceDigest, err := digest.Parse(image) diff --git a/pkg/domain/infra/abi/network.go b/pkg/domain/infra/abi/network.go index c5309ed8d1..c39b9c600b 100644 --- a/pkg/domain/infra/abi/network.go +++ b/pkg/domain/infra/abi/network.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "slices" "strconv" "github.com/containers/common/libnetwork/pasta" @@ -12,7 +13,6 @@ import ( netutil "github.com/containers/common/libnetwork/util" "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/pkg/domain/entities" - "golang.org/x/exp/slices" ) func (ic *ContainerEngine) NetworkUpdate(ctx context.Context, netName string, options entities.NetworkUpdateOptions) error { @@ -64,9 +64,13 @@ func (ic *ContainerEngine) NetworkList(ctx context.Context, options entities.Net return nets, err } -func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []string, options entities.InspectOptions) ([]types.Network, []error, error) { +func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []string, options entities.InspectOptions) ([]entities.NetworkInspectReport, []error, error) { var errs []error - networks := make([]types.Network, 0, len(namesOrIds)) + statuses, err := ic.GetContainerNetStatuses() + if err != nil { + return nil, nil, fmt.Errorf("failed to get network status for containers: %w", err) + } + networks := make([]entities.NetworkInspectReport, 0, len(namesOrIds)) for _, name := range namesOrIds { net, err := ic.Libpod.Network().NetworkInspect(name) if err != nil { @@ -77,7 +81,22 @@ func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []stri return nil, nil, fmt.Errorf("inspecting network %s: %w", name, err) } } - networks = append(networks, net) + containerMap := make(map[string]entities.NetworkContainerInfo) + for _, st := range statuses { + // Make sure to only show the info for the correct network + if sb, ok := st.Status[net.Name]; ok { + containerMap[st.ID] = entities.NetworkContainerInfo{ + Name: st.Name, + Interfaces: sb.Interfaces, + } + } + } + + netReport := entities.NetworkInspectReport{ + Network: net, + Containers: containerMap, + } + networks = append(networks, netReport) } return networks, errs, nil } @@ -243,3 +262,36 @@ func (ic *ContainerEngine) createDanglingFilterFunc(wantDangling bool) (types.Fi return wantDangling }, nil } + +type ContainerNetStatus struct { + // Name of the container + Name string + // ID of the container + ID string + // Status contains the net status, the key is the network name + Status map[string]types.StatusBlock +} + +func (ic *ContainerEngine) GetContainerNetStatuses() ([]ContainerNetStatus, error) { + cons, err := ic.Libpod.GetAllContainers() + if err != nil { + return nil, err + } + statuses := make([]ContainerNetStatus, 0, len(cons)) + for _, con := range cons { + status, err := con.GetNetworkStatus() + if err != nil { + if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) { + continue + } + return nil, err + } + + statuses = append(statuses, ContainerNetStatus{ + ID: con.ID(), + Name: con.Name(), + Status: status, + }) + } + return statuses, nil +} diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go index aa6b7013f7..ec5c1a1fc3 100644 --- a/pkg/domain/infra/abi/play.go +++ b/pkg/domain/infra/abi/play.go @@ -23,8 +23,10 @@ import ( "github.com/containers/podman/v5/cmd/podman/parse" "github.com/containers/podman/v5/libpod" "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/annotations" "github.com/containers/podman/v5/pkg/domain/entities" entitiesTypes "github.com/containers/podman/v5/pkg/domain/entities/types" + "github.com/containers/podman/v5/pkg/domain/infra/abi/internal/expansion" v1apps "github.com/containers/podman/v5/pkg/k8s.io/api/apps/v1" v1 "github.com/containers/podman/v5/pkg/k8s.io/api/core/v1" metav1 "github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/apis/meta/v1" @@ -35,12 +37,12 @@ import ( "github.com/containers/podman/v5/pkg/systemd/notifyproxy" "github.com/containers/podman/v5/pkg/util" "github.com/containers/podman/v5/utils" + "github.com/containers/storage/pkg/fileutils" "github.com/coreos/go-systemd/v22/daemon" "github.com/opencontainers/go-digest" "github.com/opencontainers/selinux/go-selinux" "github.com/sirupsen/logrus" yamlv3 "gopkg.in/yaml.v3" - "k8s.io/kubernetes/third_party/forked/golang/expansion" "sigs.k8s.io/yaml" ) @@ -124,6 +126,54 @@ func (ic *ContainerEngine) createServiceContainer(ctx context.Context, name stri return ctr, nil } +func (ic *ContainerEngine) prepareAutomountImages(ctx context.Context, forContainer string, annotations map[string]string) ([]*specgen.ImageVolume, error) { + volMap := make(map[string]*specgen.ImageVolume) + + ctrAnnotation := define.KubeImageAutomountAnnotation + "/" + forContainer + + automount, ok := annotations[ctrAnnotation] + if !ok || automount == "" { + return nil, nil + } + + for _, imageName := range strings.Split(automount, ";") { + img, fullName, err := ic.Libpod.LibimageRuntime().LookupImage(imageName, nil) + if err != nil { + return nil, fmt.Errorf("image %s from container %s does not exist in local storage, cannot automount: %w", imageName, forContainer, err) + } + + logrus.Infof("Resolved image name %s to %s for automount into container %s", imageName, fullName, forContainer) + + inspect, err := img.Inspect(ctx, nil) + if err != nil { + return nil, fmt.Errorf("cannot inspect image %s to automount into container %s: %w", fullName, forContainer, err) + } + + volumes := inspect.Config.Volumes + + for path := range volumes { + if oldPath, ok := volMap[path]; ok && oldPath != nil { + logrus.Warnf("Multiple volume mounts to %q requested, overriding image %q with image %s", path, oldPath.Source, fullName) + } + + imgVol := new(specgen.ImageVolume) + imgVol.Source = fullName + imgVol.Destination = path + imgVol.ReadWrite = false + imgVol.SubPath = path + + volMap[path] = imgVol + } + } + + toReturn := make([]*specgen.ImageVolume, 0, len(volMap)) + for _, vol := range volMap { + toReturn = append(toReturn, vol) + } + + return toReturn, nil +} + func prepareVolumesFrom(forContainer, podName string, ctrNames, annotations map[string]string) ([]string, error) { annotationVolsFrom := define.VolumesFromAnnotation + "/" + forContainer @@ -289,11 +339,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options podTemplateSpec.ObjectMeta = podYAML.ObjectMeta podTemplateSpec.Spec = podYAML.Spec - for name, val := range podYAML.Annotations { - if len(val) > define.MaxKubeAnnotation && !options.UseLongAnnotations { - return nil, fmt.Errorf("annotation %q=%q value length exceeds Kubernetes max %d", name, val, define.MaxKubeAnnotation) - } - } + for name, val := range options.Annotations { if podYAML.Annotations == nil { podYAML.Annotations = make(map[string]string) @@ -301,6 +347,10 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options podYAML.Annotations[name] = val } + if err := annotations.ValidateAnnotations(podYAML.Annotations); err != nil { + return nil, err + } + r, proxies, err := ic.playKubePod(ctx, podTemplateSpec.ObjectMeta.Name, &podTemplateSpec, options, &ipIndex, podYAML.Annotations, configMaps, serviceContainer) if err != nil { return nil, err @@ -827,6 +877,11 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY initCtrType = define.OneShotInitContainer } + automountImages, err := ic.prepareAutomountImages(ctx, initCtr.Name, annotations) + if err != nil { + return nil, nil, err + } + var volumesFrom []string if list, err := prepareVolumesFrom(initCtr.Name, podName, ctrNames, annotations); err != nil { return nil, nil, err @@ -855,6 +910,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY UserNSIsHost: p.Userns.IsHost(), Volumes: volumes, VolumesFrom: volumesFrom, + ImageVolumes: automountImages, UtsNSIsHost: p.UtsNs.IsHost(), } specGen, err := kube.ToSpecGen(ctx, &specgenOpts) @@ -911,6 +967,11 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY labels[k] = v } + automountImages, err := ic.prepareAutomountImages(ctx, container.Name, annotations) + if err != nil { + return nil, nil, err + } + var volumesFrom []string if list, err := prepareVolumesFrom(container.Name, podName, ctrNames, annotations); err != nil { return nil, nil, err @@ -933,12 +994,14 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY PodInfraID: podInfraID, PodName: podName, PodSecurityContext: podYAML.Spec.SecurityContext, + RestartPolicy: podSpec.PodSpecGen.RestartPolicy, // pass the restart policy to the container (https://github.com/containers/podman/issues/20903) ReadOnly: readOnly, SeccompPaths: seccompPaths, SecretsManager: secretsManager, UserNSIsHost: p.Userns.IsHost(), Volumes: volumes, VolumesFrom: volumesFrom, + ImageVolumes: automountImages, UtsNSIsHost: p.UtsNs.IsHost(), } @@ -1108,7 +1171,7 @@ func (ic *ContainerEngine) getImageAndLabelInfo(ctx context.Context, cwd string, } else { if named, err := reference.ParseNamed(container.Image); err == nil { tagged, isTagged := named.(reference.NamedTagged) - if isTagged && tagged.Tag() == "latest" { + if !isTagged || tagged.Tag() == "latest" { // Make sure to always pull the latest image in case it got updated. pullPolicy = config.PullPolicyNewer } @@ -1441,6 +1504,7 @@ func sortKubeKinds(documentList [][]byte) ([][]byte, error) { return sortedDocumentList, nil } + func imageNamePrefix(imageName string) string { prefix := imageName s := strings.Split(prefix, ":") @@ -1463,7 +1527,7 @@ func getBuildFile(imageName string, cwd string) (string, error) { containerfilePath := filepath.Join(cwd, buildDirName, "Containerfile") dockerfilePath := filepath.Join(cwd, buildDirName, "Dockerfile") - _, err := os.Stat(containerfilePath) + err := fileutils.Exists(containerfilePath) if err == nil { logrus.Debugf("Building %s with %s", imageName, containerfilePath) return containerfilePath, nil @@ -1475,7 +1539,7 @@ func getBuildFile(imageName string, cwd string) (string, error) { logrus.Error(err.Error()) } - _, err = os.Stat(dockerfilePath) + err = fileutils.Exists(dockerfilePath) if err == nil { logrus.Debugf("Building %s with %s", imageName, dockerfilePath) return dockerfilePath, nil @@ -1600,7 +1664,10 @@ func (ic *ContainerEngine) PlayKubeDown(ctx context.Context, body io.Reader, opt } // Add the reports - reports.StopReport, err = ic.PodStop(ctx, podNames, entities.PodStopOptions{Ignore: true}) + reports.StopReport, err = ic.PodStop(ctx, podNames, entities.PodStopOptions{ + Ignore: true, + Timeout: -1, + }) if err != nil { return nil, err } diff --git a/pkg/domain/infra/abi/pods.go b/pkg/domain/infra/abi/pods.go index 72a4ca0f6f..8d44532023 100644 --- a/pkg/domain/infra/abi/pods.go +++ b/pkg/domain/infra/abi/pods.go @@ -194,7 +194,10 @@ func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, opt return nil, err } for _, p := range pods { - report := entities.PodStopReport{Id: p.ID()} + report := entities.PodStopReport{ + Id: p.ID(), + RawInput: p.Name(), + } errs, err := p.StopWithTimeout(ctx, true, options.Timeout) if err != nil && !errors.Is(err, define.ErrPodPartialFail) { report.Errs = []error{err} @@ -247,7 +250,10 @@ func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, op } for _, p := range pods { - report := entities.PodStartReport{Id: p.ID()} + report := entities.PodStartReport{ + Id: p.ID(), + RawInput: p.Name(), + } errs, err := p.Start(ctx) if err != nil && !errors.Is(err, define.ErrPodPartialFail) { report.Errs = []error{err} diff --git a/pkg/domain/infra/abi/pods_stats.go b/pkg/domain/infra/abi/pods_stats.go index 5576f7680f..8ece02c2c6 100644 --- a/pkg/domain/infra/abi/pods_stats.go +++ b/pkg/domain/infra/abi/pods_stats.go @@ -10,7 +10,6 @@ import ( "github.com/containers/podman/v5/libpod" "github.com/containers/podman/v5/pkg/domain/entities" "github.com/containers/podman/v5/pkg/rootless" - "github.com/containers/podman/v5/utils" "github.com/docker/go-units" ) @@ -85,12 +84,7 @@ func combineBytesValues(a, b uint64) string { } func floatToPercentString(f float64) string { - strippedFloat, err := utils.RemoveScientificNotationFromFloat(f) - if err != nil || strippedFloat == 0 { - // If things go bazinga, return a safe value - return "--" - } - return fmt.Sprintf("%.2f", strippedFloat) + "%" + return fmt.Sprintf("%.2f%%", f) } func pidsToString(pid uint64) string { diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go index 75dc5e65ab..2da7b6e5fc 100644 --- a/pkg/domain/infra/abi/system.go +++ b/pkg/domain/infra/abi/system.go @@ -15,6 +15,7 @@ import ( "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage" "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fileutils" "github.com/sirupsen/logrus" ) @@ -51,7 +52,7 @@ func (ic *ContainerEngine) Info(ctx context.Context) (*define.Info, error) { } if uri.Scheme == "unix" { - _, err := os.Stat(uri.Path) + err := fileutils.Exists(uri.Path) info.Host.RemoteSocket.Exists = err == nil } else { info.Host.RemoteSocket.Exists = true @@ -336,3 +337,11 @@ func (ic ContainerEngine) Locks(ctx context.Context) (*entities.LocksReport, err report.LocksHeld = held return &report, nil } + +func (ic ContainerEngine) SystemCheck(ctx context.Context, options entities.SystemCheckOptions) (*entities.SystemCheckReport, error) { + report, err := ic.Libpod.SystemCheck(ctx, options) + if err != nil { + return nil, err + } + return &report, nil +} diff --git a/pkg/domain/infra/abi/system_freebsd.go b/pkg/domain/infra/abi/system_freebsd.go index c6ec91943e..1521a7e1a8 100644 --- a/pkg/domain/infra/abi/system_freebsd.go +++ b/pkg/domain/infra/abi/system_freebsd.go @@ -8,6 +8,6 @@ import ( const defaultRunPath = "/var/run" // SetupRootless in a NOP for freebsd as it only configures the rootless userns on linux. -func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool) error { +func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool, cgroupMode string) error { return nil } diff --git a/pkg/domain/infra/abi/system_linux.go b/pkg/domain/infra/abi/system_linux.go index abe00d89af..fe36ad88a7 100644 --- a/pkg/domain/infra/abi/system_linux.go +++ b/pkg/domain/infra/abi/system_linux.go @@ -17,7 +17,7 @@ import ( // Default path for system runtime state const defaultRunPath = "/run" -func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool) error { +func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool, cgroupMode string) error { runsUnderSystemd := systemd.RunsOnSystemd() if !runsUnderSystemd { isPid1 := os.Getpid() == 1 @@ -30,30 +30,33 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool) } } - // do it only after podman has already re-execed and running with uid==0. - hasCapSysAdmin, err := unshare.HasCapSysAdmin() - if err != nil { - return err - } - // check for both euid == 0 and CAP_SYS_ADMIN because we may be running in a container with CAP_SYS_ADMIN set. - if os.Geteuid() == 0 && hasCapSysAdmin { - ownsCgroup, err := cgroups.UserOwnsCurrentSystemdCgroup() + configureCgroup := cgroupMode != "disabled" + if configureCgroup { + // do it only after podman has already re-execed and running with uid==0. + hasCapSysAdmin, err := unshare.HasCapSysAdmin() if err != nil { - logrus.Infof("Failed to detect the owner for the current cgroup: %v", err) + return err } - if !ownsCgroup { - conf, err := ic.Config(context.Background()) + // check for both euid == 0 and CAP_SYS_ADMIN because we may be running in a container with CAP_SYS_ADMIN set. + if os.Geteuid() == 0 && hasCapSysAdmin { + ownsCgroup, err := cgroups.UserOwnsCurrentSystemdCgroup() if err != nil { - return err + logrus.Infof("Failed to detect the owner for the current cgroup: %v", err) } - unitName := fmt.Sprintf("podman-%d.scope", os.Getpid()) - if runsUnderSystemd || conf.Engine.CgroupManager == config.SystemdCgroupsManager { - if err := systemd.RunUnderSystemdScope(os.Getpid(), "user.slice", unitName); err != nil { - logrus.Debugf("Failed to add podman to systemd sandbox cgroup: %v", err) + if !ownsCgroup { + conf, err := ic.Config(context.Background()) + if err != nil { + return err + } + unitName := fmt.Sprintf("podman-%d.scope", os.Getpid()) + if runsUnderSystemd || conf.Engine.CgroupManager == config.SystemdCgroupsManager { + if err := systemd.RunUnderSystemdScope(os.Getpid(), "user.slice", unitName); err != nil { + logrus.Debugf("Failed to add podman to systemd sandbox cgroup: %v", err) + } } } + return nil } - return nil } pausePidPath, err := util.GetRootlessPauseProcessPidPath() diff --git a/pkg/domain/infra/runtime_libpod.go b/pkg/domain/infra/runtime_libpod.go index 6c4d4494de..12e4d26aee 100644 --- a/pkg/domain/infra/runtime_libpod.go +++ b/pkg/domain/infra/runtime_libpod.go @@ -9,6 +9,7 @@ import ( "io/fs" "os" "os/signal" + "strings" "sync" "syscall" @@ -90,11 +91,23 @@ func getRuntime(ctx context.Context, fs *flag.FlagSet, opts *engineOpts) (*libpo storageOpts.RunRoot = cfg.Runroot } if fs.Changed("imagestore") { + storageSet = true storageOpts.ImageStore = cfg.ImageStore options = append(options, libpod.WithImageStore(cfg.ImageStore)) } - if len(storageOpts.RunRoot) > 50 { - return nil, errors.New("the specified runroot is longer than 50 characters") + if fs.Changed("pull-option") { + storageSet = true + storageOpts.PullOptions = make(map[string]string) + for _, v := range cfg.PullOptions { + if v == "" { + continue + } + val := strings.SplitN(v, "=", 2) + if len(val) != 2 { + return nil, fmt.Errorf("invalid pull option: %s", v) + } + storageOpts.PullOptions[val[0]] = val[1] + } } if fs.Changed("storage-driver") { storageSet = true diff --git a/pkg/domain/infra/tunnel/helpers.go b/pkg/domain/infra/tunnel/helpers.go index 3d33f179d6..a60b67e9e2 100644 --- a/pkg/domain/infra/tunnel/helpers.go +++ b/pkg/domain/infra/tunnel/helpers.go @@ -81,7 +81,7 @@ func getContainersAndInputByContext(contextWithConnection context.Context, all, return filtered, rawInputs, nil } -func getPodsByContext(contextWithConnection context.Context, all bool, namesOrIDs []string) ([]*entities.ListPodsReport, error) { +func getPodsByContext(contextWithConnection context.Context, all bool, ignore bool, namesOrIDs []string) ([]*entities.ListPodsReport, error) { if all && len(namesOrIDs) > 0 { return nil, errors.New("cannot look up specific pods and all") } @@ -108,6 +108,9 @@ func getPodsByContext(contextWithConnection context.Context, all bool, namesOrID inspectData, err := pods.Inspect(contextWithConnection, nameOrID, nil) if err != nil { if errorhandling.Contains(err, define.ErrNoSuchPod) { + if ignore { + continue + } return nil, fmt.Errorf("unable to find pod %q: %w", nameOrID, define.ErrNoSuchPod) } return nil, err @@ -126,6 +129,9 @@ func getPodsByContext(contextWithConnection context.Context, all bool, namesOrID } if !found { + if ignore { + continue + } return nil, fmt.Errorf("unable to find pod %q: %w", nameOrID, define.ErrNoSuchPod) } } diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go index 9090a2b6db..2b8daeba78 100644 --- a/pkg/domain/infra/tunnel/images.go +++ b/pkg/domain/infra/tunnel/images.go @@ -273,6 +273,12 @@ func (ir *ImageEngine) Push(ctx context.Context, source string, destination stri options.WithSkipTLSVerify(false) } } + if opts.Retry != nil { + options.WithRetry(*opts.Retry) + } + if opts.RetryDelay != "" { + options.WithRetryDelay(opts.RetryDelay) + } if err := images.Push(ir.ClientCtx, source, destination, options); err != nil { return nil, err } diff --git a/pkg/domain/infra/tunnel/manifest.go b/pkg/domain/infra/tunnel/manifest.go index cdbfd63fc0..243f4b6b5d 100644 --- a/pkg/domain/infra/tunnel/manifest.go +++ b/pkg/domain/infra/tunnel/manifest.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "slices" "strings" "github.com/containers/image/v5/types" @@ -15,7 +16,7 @@ import ( // ManifestCreate implements manifest create via ImageEngine func (ir *ImageEngine) ManifestCreate(ctx context.Context, name string, images []string, opts entities.ManifestCreateOptions) (string, error) { - options := new(manifests.CreateOptions).WithAll(opts.All).WithAmend(opts.Amend) + options := new(manifests.CreateOptions).WithAll(opts.All).WithAmend(opts.Amend).WithAnnotation(opts.Annotations) imageID, err := manifests.Create(ir.ClientCtx, name, images, options) if err != nil { return imageID, fmt.Errorf("creating manifest: %w", err) @@ -57,6 +58,13 @@ func (ir *ImageEngine) ManifestInspect(ctx context.Context, name string, opts en // ManifestAdd adds images to the manifest list func (ir *ImageEngine) ManifestAdd(_ context.Context, name string, imageNames []string, opts entities.ManifestAddOptions) (string, error) { + imageNames = slices.Clone(imageNames) + for _, image := range opts.Images { + if !slices.Contains(imageNames, image) { + imageNames = append(imageNames, image) + } + } + options := new(manifests.AddOptions).WithAll(opts.All).WithArch(opts.Arch).WithVariant(opts.Variant) options.WithFeatures(opts.Features).WithImages(imageNames).WithOS(opts.OS).WithOSVersion(opts.OSVersion) options.WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile) @@ -89,6 +97,39 @@ func (ir *ImageEngine) ManifestAdd(_ context.Context, name string, imageNames [] return id, nil } +// ManifestAddArtifact creates artifact manifests and adds them to the manifest list +func (ir *ImageEngine) ManifestAddArtifact(_ context.Context, name string, files []string, opts entities.ManifestAddArtifactOptions) (string, error) { + files = slices.Clone(files) + for _, file := range opts.Files { + if !slices.Contains(files, file) { + files = append(files, file) + } + } + options := new(manifests.AddArtifactOptions).WithArch(opts.Arch).WithVariant(opts.Variant) + options.WithFeatures(opts.Features).WithOS(opts.OS).WithOSVersion(opts.OSVersion).WithOSFeatures(opts.OSFeatures) + if len(opts.Annotation) != 0 { + annotations := make(map[string]string) + for _, annotationSpec := range opts.Annotation { + key, val, hasVal := strings.Cut(annotationSpec, "=") + if !hasVal { + return "", fmt.Errorf("no value given for annotation %q", key) + } + annotations[key] = val + } + options.WithAnnotation(annotations) + } + options.WithType(opts.Type).WithConfigType(opts.ConfigType).WithLayerType(opts.LayerType) + options.WithConfig(opts.Config) + options.WithExcludeTitles(opts.ExcludeTitles).WithSubject(opts.Subject) + options.WithAnnotations(opts.Annotations) + options.WithFiles(files) + id, err := manifests.AddArtifact(ir.ClientCtx, name, options) + if err != nil { + return id, fmt.Errorf("adding to manifest list %s: %w", name, err) + } + return id, nil +} + // ManifestAnnotate updates an entry of the manifest list func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, name, images string, opts entities.ManifestAnnotateOptions) (string, error) { options := new(manifests.ModifyOptions).WithArch(opts.Arch).WithVariant(opts.Variant) @@ -160,13 +201,13 @@ func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination strin // ManifestListClear clears out all instances from a manifest list func (ir *ImageEngine) ManifestListClear(ctx context.Context, name string) (string, error) { - listContents, err := manifests.InspectListData(ctx, name, &manifests.InspectOptions{}) + listContents, err := manifests.InspectListData(ir.ClientCtx, name, &manifests.InspectOptions{}) if err != nil { return "", err } for _, instance := range listContents.Manifests { - if _, err := manifests.Remove(ctx, name, instance.Digest.String(), &manifests.RemoveOptions{}); err != nil { + if _, err := manifests.Remove(ir.ClientCtx, name, instance.Digest.String(), &manifests.RemoveOptions{}); err != nil { return "", err } } diff --git a/pkg/domain/infra/tunnel/network.go b/pkg/domain/infra/tunnel/network.go index e09e247a3b..bd48f6dcaa 100644 --- a/pkg/domain/infra/tunnel/network.go +++ b/pkg/domain/infra/tunnel/network.go @@ -22,9 +22,9 @@ func (ic *ContainerEngine) NetworkList(ctx context.Context, opts entities.Networ return network.List(ic.ClientCtx, options) } -func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []string, opts entities.InspectOptions) ([]types.Network, []error, error) { +func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []string, opts entities.InspectOptions) ([]entities.NetworkInspectReport, []error, error) { var ( - reports = make([]types.Network, 0, len(namesOrIds)) + reports = make([]entities.NetworkInspectReport, 0, len(namesOrIds)) errs = []error{} ) options := new(network.InspectOptions) diff --git a/pkg/domain/infra/tunnel/pods.go b/pkg/domain/infra/tunnel/pods.go index f485b8a386..4a8a3c32ed 100644 --- a/pkg/domain/infra/tunnel/pods.go +++ b/pkg/domain/infra/tunnel/pods.go @@ -23,7 +23,7 @@ func (ic *ContainerEngine) PodKill(ctx context.Context, namesOrIds []string, opt return nil, err } - foundPods, err := getPodsByContext(ic.ClientCtx, opts.All, namesOrIds) + foundPods, err := getPodsByContext(ic.ClientCtx, opts.All, false, namesOrIds) if err != nil { return nil, err } @@ -55,7 +55,7 @@ func (ic *ContainerEngine) PodLogs(ctx context.Context, nameOrIDs string, option } func (ic *ContainerEngine) PodPause(ctx context.Context, namesOrIds []string, options entities.PodPauseOptions) ([]*entities.PodPauseReport, error) { - foundPods, err := getPodsByContext(ic.ClientCtx, options.All, namesOrIds) + foundPods, err := getPodsByContext(ic.ClientCtx, options.All, false, namesOrIds) if err != nil { return nil, err } @@ -76,7 +76,7 @@ func (ic *ContainerEngine) PodPause(ctx context.Context, namesOrIds []string, op } func (ic *ContainerEngine) PodUnpause(ctx context.Context, namesOrIds []string, options entities.PodunpauseOptions) ([]*entities.PodUnpauseReport, error) { - foundPods, err := getPodsByContext(ic.ClientCtx, options.All, namesOrIds) + foundPods, err := getPodsByContext(ic.ClientCtx, options.All, false, namesOrIds) if err != nil { return nil, err } @@ -102,8 +102,8 @@ func (ic *ContainerEngine) PodUnpause(ctx context.Context, namesOrIds []string, func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, opts entities.PodStopOptions) ([]*entities.PodStopReport, error) { timeout := -1 - foundPods, err := getPodsByContext(ic.ClientCtx, opts.All, namesOrIds) - if err != nil && !(opts.Ignore && errors.Is(err, define.ErrNoSuchPod)) { + foundPods, err := getPodsByContext(ic.ClientCtx, opts.All, opts.Ignore, namesOrIds) + if err != nil { return nil, err } if opts.Timeout != -1 { @@ -115,8 +115,9 @@ func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, opt response, err := pods.Stop(ic.ClientCtx, p.Id, options) if err != nil { report := entities.PodStopReport{ - Errs: []error{err}, - Id: p.Id, + Errs: []error{err}, + Id: p.Id, + RawInput: p.Name, } reports = append(reports, &report) continue @@ -127,7 +128,7 @@ func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, opt } func (ic *ContainerEngine) PodRestart(ctx context.Context, namesOrIds []string, options entities.PodRestartOptions) ([]*entities.PodRestartReport, error) { - foundPods, err := getPodsByContext(ic.ClientCtx, options.All, namesOrIds) + foundPods, err := getPodsByContext(ic.ClientCtx, options.All, false, namesOrIds) if err != nil { return nil, err } @@ -148,7 +149,7 @@ func (ic *ContainerEngine) PodRestart(ctx context.Context, namesOrIds []string, } func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, options entities.PodStartOptions) ([]*entities.PodStartReport, error) { - foundPods, err := getPodsByContext(ic.ClientCtx, options.All, namesOrIds) + foundPods, err := getPodsByContext(ic.ClientCtx, options.All, false, namesOrIds) if err != nil { return nil, err } @@ -157,8 +158,9 @@ func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, op response, err := pods.Start(ic.ClientCtx, p.Id, nil) if err != nil { report := entities.PodStartReport{ - Errs: []error{err}, - Id: p.Id, + Errs: []error{err}, + Id: p.Id, + RawInput: p.Name, } reports = append(reports, &report) continue @@ -169,8 +171,8 @@ func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, op } func (ic *ContainerEngine) PodRm(ctx context.Context, namesOrIds []string, opts entities.PodRmOptions) ([]*entities.PodRmReport, error) { - foundPods, err := getPodsByContext(ic.ClientCtx, opts.All, namesOrIds) - if err != nil && !(opts.Ignore && errors.Is(err, define.ErrNoSuchPod)) { + foundPods, err := getPodsByContext(ic.ClientCtx, opts.All, opts.Ignore, namesOrIds) + if err != nil { return nil, err } reports := make([]*entities.PodRmReport, 0, len(foundPods)) diff --git a/pkg/domain/infra/tunnel/system.go b/pkg/domain/infra/tunnel/system.go index 492fd0a894..025e902620 100644 --- a/pkg/domain/infra/tunnel/system.go +++ b/pkg/domain/infra/tunnel/system.go @@ -13,7 +13,7 @@ func (ic *ContainerEngine) Info(ctx context.Context) (*define.Info, error) { return system.Info(ic.ClientCtx, nil) } -func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool) error { +func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool, cgroupMode string) error { panic(errors.New("rootless engine mode is not supported when tunneling")) } @@ -23,6 +23,15 @@ func (ic *ContainerEngine) SystemPrune(ctx context.Context, opts entities.System return system.Prune(ic.ClientCtx, options) } +func (ic *ContainerEngine) SystemCheck(ctx context.Context, opts entities.SystemCheckOptions) (*entities.SystemCheckReport, error) { + options := new(system.CheckOptions).WithQuick(opts.Quick).WithRepair(opts.Repair).WithRepairLossy(opts.RepairLossy) + if opts.UnreferencedLayerMaximumAge != nil { + duration := *opts.UnreferencedLayerMaximumAge + options = options.WithUnreferencedLayerMaximumAge(duration.String()) + } + return system.Check(ic.ClientCtx, options) +} + func (ic *ContainerEngine) Migrate(ctx context.Context, options entities.SystemMigrateOptions) error { return errors.New("runtime migration is not supported on remote clients") } diff --git a/pkg/env/env.go b/pkg/env/env.go index 6dbfb02017..a2165931ca 100644 --- a/pkg/env/env.go +++ b/pkg/env/env.go @@ -8,6 +8,8 @@ import ( "fmt" "os" "strings" + + "golang.org/x/exp/maps" ) const whiteSpaces = " \t" @@ -50,8 +52,9 @@ func Map(slice []string) map[string]string { // Join joins the two environment maps with override overriding base. func Join(base map[string]string, override map[string]string) map[string]string { if len(base) == 0 { - return override + return maps.Clone(override) } + base = maps.Clone(base) for k, v := range override { base[k] = v } diff --git a/pkg/farm/list_builder.go b/pkg/farm/list_builder.go index 8ac81ea29c..d8fce5e32f 100644 --- a/pkg/farm/list_builder.go +++ b/pkg/farm/list_builder.go @@ -26,7 +26,7 @@ type listLocal struct { options listBuilderOptions } -// newLocalManifestListBuilder returns a manifest list builder which saves a +// newManifestListBuilder returns a manifest list builder which saves a // manifest list and images to local storage. func newManifestListBuilder(listName string, localEngine entities.ImageEngine, options listBuilderOptions) *listLocal { return &listLocal{ diff --git a/pkg/machine/apple/apple.go b/pkg/machine/apple/apple.go new file mode 100644 index 0000000000..b69522524a --- /dev/null +++ b/pkg/machine/apple/apple.go @@ -0,0 +1,381 @@ +//go:build darwin + +package apple + +import ( + "context" + "errors" + "fmt" + "net" + "os" + "os/exec" + "syscall" + "time" + + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/strongunits" + gvproxy "github.com/containers/gvisor-tap-vsock/pkg/types" + "github.com/containers/podman/v5/pkg/machine" + "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/ignition" + "github.com/containers/podman/v5/pkg/machine/sockets" + "github.com/containers/podman/v5/pkg/machine/vmconfigs" + "github.com/containers/podman/v5/pkg/systemd/parser" + vfConfig "github.com/crc-org/vfkit/pkg/config" + "github.com/sirupsen/logrus" +) + +const applehvMACAddress = "5a:94:ef:e4:0c:ee" + +var ( + gvProxyWaitBackoff = 500 * time.Millisecond + gvProxyMaxBackoffAttempts = 6 + ignitionSocketName = "ignition.sock" +) + +// ResizeDisk uses os truncate to resize (only larger) a raw disk. the input size +// is assumed GiB +func ResizeDisk(mc *vmconfigs.MachineConfig, newSize strongunits.GiB) error { + logrus.Debugf("resizing %s to %d bytes", mc.ImagePath.GetPath(), newSize.ToBytes()) + return os.Truncate(mc.ImagePath.GetPath(), int64(newSize.ToBytes())) +} + +func SetProviderAttrs(mc *vmconfigs.MachineConfig, opts define.SetOptions, state define.Status) error { + if state != define.Stopped { + return errors.New("unable to change settings unless vm is stopped") + } + + if opts.DiskSize != nil { + if err := ResizeDisk(mc, *opts.DiskSize); err != nil { + return err + } + } + + if opts.Rootful != nil && mc.HostUser.Rootful != *opts.Rootful { + if err := mc.SetRootful(*opts.Rootful); err != nil { + return err + } + } + + if opts.USBs != nil { + return fmt.Errorf("changing USBs not supported for applehv machines") + } + + // VFKit does not require saving memory, disk, or cpu + return nil +} + +func GenerateSystemDFilesForVirtiofsMounts(mounts []machine.VirtIoFs) ([]ignition.Unit, error) { + // mounting in fcos with virtiofs is a bit of a dance. we need a unit file for the mount, a unit file + // for automatic mounting on boot, and a "preparatory" service file that disables FCOS security, performs + // the mkdir of the mount point, and then re-enables security. This must be done for each mount. + + unitFiles := make([]ignition.Unit, 0, len(mounts)) + for _, mnt := range mounts { + // Create mount unit for each mount + mountUnit := parser.NewUnitFile() + mountUnit.Add("Mount", "What", "%s") + mountUnit.Add("Mount", "Where", "%s") + mountUnit.Add("Mount", "Type", "virtiofs") + mountUnit.Add("Mount", "Options", fmt.Sprintf("context=\"%s\"", machine.NFSSELinuxContext)) + mountUnit.Add("Install", "WantedBy", "multi-user.target") + mountUnitFile, err := mountUnit.ToString() + if err != nil { + return nil, err + } + + virtiofsMount := ignition.Unit{ + Enabled: ignition.BoolToPtr(true), + Name: fmt.Sprintf("%s.mount", parser.PathEscape(mnt.Target)), + Contents: ignition.StrToPtr(fmt.Sprintf(mountUnitFile, mnt.Tag, mnt.Target)), + } + + unitFiles = append(unitFiles, virtiofsMount) + } + + // This is a way to workaround the FCOS limitation of creating directories + // at the rootfs / and then mounting to them. + immutableRootOff := parser.NewUnitFile() + immutableRootOff.Add("Unit", "Description", "Allow systemd to create mount points on /") + immutableRootOff.Add("Unit", "DefaultDependencies", "no") + + immutableRootOff.Add("Service", "Type", "oneshot") + immutableRootOff.Add("Service", "ExecStart", "chattr -i /") + + immutableRootOff.Add("Install", "WantedBy", "remote-fs-pre.target") + immutableRootOffFile, err := immutableRootOff.ToString() + if err != nil { + return nil, err + } + + immutableRootOffUnit := ignition.Unit{ + Contents: ignition.StrToPtr(immutableRootOffFile), + Name: "immutable-root-off.service", + Enabled: ignition.BoolToPtr(true), + } + unitFiles = append(unitFiles, immutableRootOffUnit) + + immutableRootOn := parser.NewUnitFile() + immutableRootOn.Add("Unit", "Description", "Set / back to immutable after mounts are done") + immutableRootOn.Add("Unit", "DefaultDependencies", "no") + immutableRootOn.Add("Unit", "After", "remote-fs.target") + + immutableRootOn.Add("Service", "Type", "oneshot") + immutableRootOn.Add("Service", "ExecStart", "chattr +i /") + + immutableRootOn.Add("Install", "WantedBy", "remote-fs.target") + immutableRootOnFile, err := immutableRootOn.ToString() + if err != nil { + return nil, err + } + + immutableRootOnUnit := ignition.Unit{ + Contents: ignition.StrToPtr(immutableRootOnFile), + Name: "immutable-root-on.service", + Enabled: ignition.BoolToPtr(true), + } + unitFiles = append(unitFiles, immutableRootOnUnit) + + return unitFiles, nil +} + +// StartGenericAppleVM is wrappered by apple provider methods and starts the vm +func StartGenericAppleVM(mc *vmconfigs.MachineConfig, cmdBinary string, bootloader vfConfig.Bootloader, endpoint string) (func() error, func() error, error) { + var ( + ignitionSocket *define.VMFile + ) + + // Add networking + netDevice, err := vfConfig.VirtioNetNew(applehvMACAddress) + if err != nil { + return nil, nil, err + } + // Set user networking with gvproxy + + gvproxySocket, err := mc.GVProxySocket() + if err != nil { + return nil, nil, err + } + + // Wait on gvproxy to be running and aware + if err := sockets.WaitForSocketWithBackoffs(gvProxyMaxBackoffAttempts, gvProxyWaitBackoff, gvproxySocket.GetPath(), "gvproxy"); err != nil { + return nil, nil, err + } + + netDevice.SetUnixSocketPath(gvproxySocket.GetPath()) + + // create a one-time virtual machine for starting because we dont want all this information in the + // machineconfig if possible. the preference was to derive this stuff + vm := vfConfig.NewVirtualMachine(uint(mc.Resources.CPUs), uint64(mc.Resources.Memory), bootloader) + + defaultDevices, readySocket, err := GetDefaultDevices(mc) + if err != nil { + return nil, nil, err + } + + vm.Devices = append(vm.Devices, defaultDevices...) + vm.Devices = append(vm.Devices, netDevice) + + mounts, err := VirtIOFsToVFKitVirtIODevice(mc.Mounts) + if err != nil { + return nil, nil, err + } + vm.Devices = append(vm.Devices, mounts...) + + // To start the VM, we need to call vfkit + cfg, err := config.Default() + if err != nil { + return nil, nil, err + } + + cmdBinaryPath, err := cfg.FindHelperBinary(cmdBinary, true) + if err != nil { + return nil, nil, err + } + + logrus.Debugf("helper binary path is: %s", cmdBinaryPath) + + cmd, err := vm.Cmd(cmdBinaryPath) + if err != nil { + return nil, nil, err + } + if logrus.IsLevelEnabled(logrus.DebugLevel) { + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + } + + endpointArgs, err := GetVfKitEndpointCMDArgs(endpoint) + if err != nil { + return nil, nil, err + } + + machineDataDir, err := mc.DataDir() + if err != nil { + return nil, nil, err + } + + cmd.Args = append(cmd.Args, endpointArgs...) + + firstBoot, err := mc.IsFirstBoot() + if err != nil { + return nil, nil, err + } + + if logrus.IsLevelEnabled(logrus.DebugLevel) { + debugDevArgs, err := GetDebugDevicesCMDArgs() + if err != nil { + return nil, nil, err + } + cmd.Args = append(cmd.Args, debugDevArgs...) + cmd.Args = append(cmd.Args, "--gui") // add command line switch to pop the gui open + } + + if firstBoot { + // If this is the first boot of the vm, we need to add the vsock + // device to vfkit so we can inject the ignition file + socketName := fmt.Sprintf("%s-%s", mc.Name, ignitionSocketName) + ignitionSocket, err = machineDataDir.AppendToNewVMFile(socketName, &socketName) + if err != nil { + return nil, nil, err + } + if err := ignitionSocket.Delete(); err != nil { + logrus.Errorf("unable to delete ignition socket: %q", err) + } + + ignitionVsockDeviceCLI, err := GetIgnitionVsockDeviceAsCLI(ignitionSocket.GetPath()) + if err != nil { + return nil, nil, err + } + cmd.Args = append(cmd.Args, ignitionVsockDeviceCLI...) + + logrus.Debug("first boot detected") + logrus.Debugf("serving ignition file over %s", ignitionSocket.GetPath()) + go func() { + if err := ServeIgnitionOverSock(ignitionSocket, mc); err != nil { + logrus.Error(err) + } + logrus.Debug("ignition vsock server exited") + }() + } + + logrus.Debugf("listening for ready on: %s", readySocket.GetPath()) + if err := readySocket.Delete(); err != nil { + logrus.Warnf("unable to delete previous ready socket: %q", err) + } + readyListen, err := net.Listen("unix", readySocket.GetPath()) + if err != nil { + return nil, nil, err + } + + logrus.Debug("waiting for ready notification") + readyChan := make(chan error) + go sockets.ListenAndWaitOnSocket(readyChan, readyListen) + + logrus.Debugf("helper command-line: %v", cmd.Args) + + if mc.LibKrunHypervisor != nil && logrus.IsLevelEnabled(logrus.DebugLevel) { + rtDir, err := mc.RuntimeDir() + if err != nil { + return nil, nil, err + } + kdFile, err := rtDir.AppendToNewVMFile("krunkit-debug.sh", nil) + if err != nil { + return nil, nil, err + } + f, err := os.Create(kdFile.Path) + if err != nil { + return nil, nil, err + } + err = os.Chmod(kdFile.Path, 0744) + if err != nil { + return nil, nil, err + } + + _, err = f.WriteString("#!/bin/sh\nexec ") + if err != nil { + return nil, nil, err + } + for _, arg := range cmd.Args { + _, err = f.WriteString(fmt.Sprintf("%q ", arg)) + if err != nil { + return nil, nil, err + } + } + err = f.Close() + if err != nil { + return nil, nil, err + } + + cmd = exec.Command("/usr/bin/open", "-Wa", "Terminal", kdFile.Path) + } + + if err := cmd.Start(); err != nil { + return nil, nil, err + } + + returnFunc := func() error { + processErrChan := make(chan error) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer close(processErrChan) + for { + select { + case <-ctx.Done(): + return + default: + } + if err := CheckProcessRunning(cmdBinary, cmd.Process.Pid); err != nil { + processErrChan <- err + return + } + // lets poll status every half second + time.Sleep(500 * time.Millisecond) + } + }() + + // wait for either socket or to be ready or process to have exited + select { + case err := <-processErrChan: + if err != nil { + return err + } + case err := <-readyChan: + if err != nil { + return err + } + logrus.Debug("ready notification received") + } + return nil + } + return cmd.Process.Release, returnFunc, nil +} + +// CheckProcessRunning checks non blocking if the pid exited +// returns nil if process is running otherwise an error if not +func CheckProcessRunning(processName string, pid int) error { + var status syscall.WaitStatus + pid, err := syscall.Wait4(pid, &status, syscall.WNOHANG, nil) + if err != nil { + return fmt.Errorf("failed to read %s process status: %w", processName, err) + } + if pid > 0 { + // child exited + return fmt.Errorf("%s exited unexpectedly with exit code %d", processName, status.ExitStatus()) + } + return nil +} + +// StartGenericNetworking is wrappered by apple provider methods +func StartGenericNetworking(mc *vmconfigs.MachineConfig, cmd *gvproxy.GvproxyCommand) error { + gvProxySock, err := mc.GVProxySocket() + if err != nil { + return err + } + // make sure it does not exist before gvproxy is called + if err := gvProxySock.Delete(); err != nil { + logrus.Error(err) + } + cmd.AddVfkitSocket(fmt.Sprintf("unixgram://%s", gvProxySock.GetPath())) + return nil +} diff --git a/pkg/machine/applehv/ignition.go b/pkg/machine/apple/ignition.go similarity index 87% rename from pkg/machine/applehv/ignition.go rename to pkg/machine/apple/ignition.go index 4453532182..7d292d232d 100644 --- a/pkg/machine/applehv/ignition.go +++ b/pkg/machine/apple/ignition.go @@ -1,6 +1,6 @@ //go:build darwin -package applehv +package apple import ( "net" @@ -11,9 +11,9 @@ import ( "github.com/sirupsen/logrus" ) -// serveIgnitionOverSock allows podman to open a small httpd instance on the vsock between the host +// ServeIgnitionOverSock allows podman to open a small httpd instance on the vsock between the host // and guest to inject the ignitionfile into fcos -func serveIgnitionOverSock(ignitionSocket *define.VMFile, mc *vmconfigs.MachineConfig) error { +func ServeIgnitionOverSock(ignitionSocket *define.VMFile, mc *vmconfigs.MachineConfig) error { ignitionFile, err := mc.IgnitionFile() if err != nil { return err diff --git a/pkg/machine/apple/vfkit.go b/pkg/machine/apple/vfkit.go new file mode 100644 index 0000000000..301167a78d --- /dev/null +++ b/pkg/machine/apple/vfkit.go @@ -0,0 +1,141 @@ +//go:build darwin + +package apple + +import ( + "errors" + + "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/vmconfigs" + vfConfig "github.com/crc-org/vfkit/pkg/config" + "github.com/crc-org/vfkit/pkg/rest" + "github.com/sirupsen/logrus" +) + +func GetDefaultDevices(mc *vmconfigs.MachineConfig) ([]vfConfig.VirtioDevice, *define.VMFile, error) { + var devices []vfConfig.VirtioDevice + + disk, err := vfConfig.VirtioBlkNew(mc.ImagePath.GetPath()) + if err != nil { + return nil, nil, err + } + rng, err := vfConfig.VirtioRngNew() + if err != nil { + return nil, nil, err + } + + logfile, err := mc.LogFile() + if err != nil { + return nil, nil, err + } + serial, err := vfConfig.VirtioSerialNew(logfile.GetPath()) + if err != nil { + return nil, nil, err + } + + readySocket, err := mc.ReadySocket() + if err != nil { + return nil, nil, err + } + + readyDevice, err := vfConfig.VirtioVsockNew(1025, readySocket.GetPath(), true) + if err != nil { + return nil, nil, err + } + devices = append(devices, disk, rng, readyDevice) + if mc.LibKrunHypervisor == nil || !logrus.IsLevelEnabled(logrus.DebugLevel) { + // If libkrun is the provider and we want to show the debug console, + // don't add a virtio serial device to avoid redirecting the output. + devices = append(devices, serial) + } + + if mc.AppleHypervisor != nil && mc.AppleHypervisor.Vfkit.Rosetta { + rosetta := &vfConfig.RosettaShare{ + DirectorySharingConfig: vfConfig.DirectorySharingConfig{ + MountTag: define.MountTag, + }, + InstallRosetta: true, + } + devices = append(devices, rosetta) + } + + return devices, readySocket, nil +} + +func GetDebugDevices() ([]vfConfig.VirtioDevice, error) { + var devices []vfConfig.VirtioDevice + gpu, err := vfConfig.VirtioGPUNew() + if err != nil { + return nil, err + } + mouse, err := vfConfig.VirtioInputNew(vfConfig.VirtioInputPointingDevice) + if err != nil { + return nil, err + } + kb, err := vfConfig.VirtioInputNew(vfConfig.VirtioInputKeyboardDevice) + if err != nil { + return nil, err + } + return append(devices, gpu, mouse, kb), nil +} + +func GetIgnitionVsockDevice(path string) (vfConfig.VirtioDevice, error) { + return vfConfig.VirtioVsockNew(1024, path, true) +} + +func VirtIOFsToVFKitVirtIODevice(mounts []*vmconfigs.Mount) ([]vfConfig.VirtioDevice, error) { + virtioDevices := make([]vfConfig.VirtioDevice, 0, len(mounts)) + for _, vol := range mounts { + virtfsDevice, err := vfConfig.VirtioFsNew(vol.Source, vol.Tag) + if err != nil { + return nil, err + } + virtioDevices = append(virtioDevices, virtfsDevice) + } + return virtioDevices, nil +} + +// GetVfKitEndpointCMDArgs converts the vfkit endpoint to a cmdline format +func GetVfKitEndpointCMDArgs(endpoint string) ([]string, error) { + if len(endpoint) == 0 { + return nil, errors.New("endpoint cannot be empty") + } + restEndpoint, err := rest.NewEndpoint(endpoint) + if err != nil { + return nil, err + } + return restEndpoint.ToCmdLine() +} + +// GetIgnitionVsockDeviceAsCLI retrieves the ignition vsock device and converts +// it to a cmdline format +func GetIgnitionVsockDeviceAsCLI(ignitionSocketPath string) ([]string, error) { + ignitionVsockDevice, err := GetIgnitionVsockDevice(ignitionSocketPath) + if err != nil { + return nil, err + } + // Convert the device into cli args + ignitionVsockDeviceCLI, err := ignitionVsockDevice.ToCmdLine() + if err != nil { + return nil, err + } + return ignitionVsockDeviceCLI, nil +} + +// GetDebugDevicesCMDArgs retrieves the debug devices and converts them to a +// cmdline format +func GetDebugDevicesCMDArgs() ([]string, error) { + args := []string{} + debugDevices, err := GetDebugDevices() + if err != nil { + return nil, err + } + for _, debugDevice := range debugDevices { + debugCli, err := debugDevice.ToCmdLine() + if err != nil { + return nil, err + } + args = append(args, debugCli...) + } + return args, nil +} diff --git a/pkg/machine/apple/vfkit/helper.go b/pkg/machine/apple/vfkit/helper.go new file mode 100644 index 0000000000..f971129b16 --- /dev/null +++ b/pkg/machine/apple/vfkit/helper.go @@ -0,0 +1,122 @@ +//go:build darwin + +package vfkit + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "net/http" + "time" + + "github.com/containers/podman/v5/pkg/machine/define" + "github.com/crc-org/vfkit/pkg/config" + rest "github.com/crc-org/vfkit/pkg/rest/define" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const ( + inspect = "/vm/inspect" + state = "/vm/state" + version = "/version" +) + +func (vf *Helper) get(endpoint string, payload io.Reader) (*http.Response, error) { + client := &http.Client{} + req, err := http.NewRequest(http.MethodGet, endpoint, payload) + if err != nil { + return nil, err + } + return client.Do(req) +} + +func (vf *Helper) post(endpoint string, payload io.Reader) (*http.Response, error) { + client := &http.Client{} + req, err := http.NewRequest(http.MethodPost, endpoint, payload) + if err != nil { + return nil, err + } + return client.Do(req) +} + +// getRawState asks vfkit for virtual machine state unmodified (see state()) +func (vf *Helper) getRawState() (define.Status, error) { + var response rest.VMState + endPoint := vf.Endpoint + state + serverResponse, err := vf.get(endPoint, nil) + if err != nil { + if errors.Is(err, unix.ECONNREFUSED) { + logrus.Debugf("connection refused: %s", endPoint) + } + return "", err + } + err = json.NewDecoder(serverResponse.Body).Decode(&response) + if err != nil { + return "", err + } + if err := serverResponse.Body.Close(); err != nil { + logrus.Error(err) + } + return ToMachineStatus(response.State) +} + +// state asks vfkit for the virtual machine state. in case the vfkit +// service is not responding, we assume the service is not running +// and return a stopped status +func (vf *Helper) State() (define.Status, error) { + vmState, err := vf.getRawState() + if err == nil { + return vmState, nil + } + if errors.Is(err, unix.ECONNREFUSED) { + return define.Stopped, nil + } + return "", err +} + +func (vf *Helper) stateChange(newState rest.StateChange) error { + b, err := json.Marshal(rest.VMState{State: string(newState)}) + if err != nil { + return err + } + payload := bytes.NewReader(b) + serverResponse, err := vf.post(vf.Endpoint+state, payload) + _ = serverResponse.Body.Close() + return err +} + +func (vf *Helper) Stop(force, wait bool) error { + state := rest.Stop + if force { + state = rest.HardStop + } + if err := vf.stateChange(state); err != nil { + return err + } + if !wait { + return nil + } + waitDuration := time.Millisecond * 500 + // Wait up to 90s then hard force off + for i := 0; i < 180; i++ { + _, err := vf.getRawState() + if err != nil || errors.Is(err, unix.ECONNREFUSED) { + return nil + } + time.Sleep(waitDuration) + } + logrus.Warn("Failed to gracefully stop machine, performing hard stop") + // we waited long enough do a hard stop + return vf.stateChange(rest.HardStop) +} + +// Helper describes the use of vfkit: cmdline and endpoint +type Helper struct { + LogLevel logrus.Level + Endpoint string + BinaryPath *define.VMFile + VirtualMachine *config.VirtualMachine + Rosetta bool +} diff --git a/pkg/machine/applehv/vfkit/rest.go b/pkg/machine/apple/vfkit/rest.go similarity index 100% rename from pkg/machine/applehv/vfkit/rest.go rename to pkg/machine/apple/vfkit/rest.go diff --git a/pkg/machine/applehv/machine.go b/pkg/machine/applehv/machine.go index 4d9c0b68cc..583db59b38 100644 --- a/pkg/machine/applehv/machine.go +++ b/pkg/machine/applehv/machine.go @@ -3,70 +3,14 @@ package applehv import ( - "fmt" - "os" - "syscall" - - "github.com/containers/common/pkg/strongunits" - "github.com/containers/podman/v5/pkg/machine" "github.com/containers/podman/v5/pkg/machine/define" - "github.com/containers/podman/v5/pkg/machine/ignition" "github.com/containers/podman/v5/pkg/machine/vmconfigs" - "github.com/containers/podman/v5/pkg/systemd/parser" - vfRest "github.com/crc-org/vfkit/pkg/rest" - "github.com/sirupsen/logrus" ) func (a *AppleHVStubber) Remove(mc *vmconfigs.MachineConfig) ([]string, func() error, error) { - mc.Lock() - defer mc.Unlock() - - // TODO we could delete the vfkit pid/log files if we wanted to be thorough return []string{}, func() error { return nil }, nil } -// getIgnitionVsockDeviceAsCLI retrieves the ignition vsock device and converts -// it to a cmdline format -func getIgnitionVsockDeviceAsCLI(ignitionSocketPath string) ([]string, error) { - ignitionVsockDevice, err := getIgnitionVsockDevice(ignitionSocketPath) - if err != nil { - return nil, err - } - // Convert the device into cli args - ignitionVsockDeviceCLI, err := ignitionVsockDevice.ToCmdLine() - if err != nil { - return nil, err - } - return ignitionVsockDeviceCLI, nil -} - -// getDebugDevicesCMDArgs retrieves the debug devices and converts them to a -// cmdline format -func getDebugDevicesCMDArgs() ([]string, error) { - args := []string{} - debugDevices, err := getDebugDevices() - if err != nil { - return nil, err - } - for _, debugDevice := range debugDevices { - debugCli, err := debugDevice.ToCmdLine() - if err != nil { - return nil, err - } - args = append(args, debugCli...) - } - return args, nil -} - -// getVfKitEndpointCMDArgs converts the vfkit endpoint to a cmdline format -func getVfKitEndpointCMDArgs(endpoint string) ([]string, error) { - restEndpoint, err := vfRest.NewEndpoint(endpoint) - if err != nil { - return nil, err - } - return restEndpoint.ToCmdLine() -} - func (a *AppleHVStubber) State(mc *vmconfigs.MachineConfig, _ bool) (define.Status, error) { vmStatus, err := mc.AppleHypervisor.Vfkit.State() if err != nil { @@ -76,106 +20,5 @@ func (a *AppleHVStubber) State(mc *vmconfigs.MachineConfig, _ bool) (define.Stat } func (a *AppleHVStubber) StopVM(mc *vmconfigs.MachineConfig, _ bool) error { - mc.Lock() - defer mc.Unlock() return mc.AppleHypervisor.Vfkit.Stop(false, true) } - -// checkProcessRunning checks non blocking if the pid exited -// returns nil if process is running otherwise an error if not -func checkProcessRunning(processName string, pid int) error { - var status syscall.WaitStatus - pid, err := syscall.Wait4(pid, &status, syscall.WNOHANG, nil) - if err != nil { - return fmt.Errorf("failed to read %s process status: %w", processName, err) - } - if pid > 0 { - // child exited - return fmt.Errorf("%s exited unexpectedly with exit code %d", processName, status.ExitStatus()) - } - return nil -} - -// resizeDisk uses os truncate to resize (only larger) a raw disk. the input size -// is assumed GiB -func resizeDisk(mc *vmconfigs.MachineConfig, newSize strongunits.GiB) error { - logrus.Debugf("resizing %s to %d bytes", mc.ImagePath.GetPath(), newSize.ToBytes()) - return os.Truncate(mc.ImagePath.GetPath(), int64(newSize.ToBytes())) -} - -func generateSystemDFilesForVirtiofsMounts(mounts []machine.VirtIoFs) []ignition.Unit { - // mounting in fcos with virtiofs is a bit of a dance. we need a unit file for the mount, a unit file - // for automatic mounting on boot, and a "preparatory" service file that disables FCOS security, performs - // the mkdir of the mount point, and then re-enables security. This must be done for each mount. - - var unitFiles []ignition.Unit - for _, mnt := range mounts { - // Here we are looping the mounts and for each mount, we are adding two unit files - // for virtiofs. One unit file is the mount itself and the second is to automount it - // on boot. - autoMountUnit := parser.NewUnitFile() - autoMountUnit.Add("Automount", "Where", "%s") - autoMountUnit.Add("Install", "WantedBy", "multi-user.target") - autoMountUnit.Add("Unit", "Description", "Mount virtiofs volume %s") - autoMountUnitFile, err := autoMountUnit.ToString() - if err != nil { - logrus.Warnf(err.Error()) - } - - mountUnit := parser.NewUnitFile() - mountUnit.Add("Mount", "What", "%s") - mountUnit.Add("Mount", "Where", "%s") - mountUnit.Add("Mount", "Type", "virtiofs") - mountUnit.Add("Mount", "Options", "defcontext=\"system_u:object_r:nfs_t:s0\"") - mountUnit.Add("Install", "WantedBy", "multi-user.target") - mountUnitFile, err := mountUnit.ToString() - if err != nil { - logrus.Warnf(err.Error()) - } - - virtiofsAutomount := ignition.Unit{ - Enabled: ignition.BoolToPtr(true), - Name: fmt.Sprintf("%s.automount", mnt.Tag), - Contents: ignition.StrToPtr(fmt.Sprintf(autoMountUnitFile, mnt.Target, mnt.Target)), - } - virtiofsMount := ignition.Unit{ - Enabled: ignition.BoolToPtr(true), - Name: fmt.Sprintf("%s.mount", mnt.Tag), - Contents: ignition.StrToPtr(fmt.Sprintf(mountUnitFile, mnt.Tag, mnt.Target)), - } - - // This "unit" simulates something like systemctl enable virtiofs-mount-prepare@ - enablePrep := ignition.Unit{ - Enabled: ignition.BoolToPtr(true), - Name: fmt.Sprintf("virtiofs-mount-prepare@%s.service", mnt.Tag), - } - - unitFiles = append(unitFiles, virtiofsAutomount, virtiofsMount, enablePrep) - } - - // mount prep is a way to workaround the FCOS limitation of creating directories - // at the rootfs / and then mounting to them. - mountPrep := parser.NewUnitFile() - mountPrep.Add("Unit", "Description", "Allow virtios to mount to /") - mountPrep.Add("Unit", "DefaultDependencies", "no") - mountPrep.Add("Unit", "ConditionPathExists", "!%f") - - mountPrep.Add("Service", "Type", "oneshot") - mountPrep.Add("Service", "ExecStartPre", "chattr -i /") - mountPrep.Add("Service", "ExecStart", "mkdir -p '%f'") - mountPrep.Add("Service", "ExecStopPost", "chattr +i /") - - mountPrep.Add("Install", "WantedBy", "remote-fs.target") - mountPrepFile, err := mountPrep.ToString() - if err != nil { - logrus.Warnf(err.Error()) - } - - virtioFSChattr := ignition.Unit{ - Contents: ignition.StrToPtr(mountPrepFile), - Name: "virtiofs-mount-prepare@.service", - } - unitFiles = append(unitFiles, virtioFSChattr) - - return unitFiles -} diff --git a/pkg/machine/applehv/stubber.go b/pkg/machine/applehv/stubber.go index 910a3dbbaa..b46767e798 100644 --- a/pkg/machine/applehv/stubber.go +++ b/pkg/machine/applehv/stubber.go @@ -3,37 +3,28 @@ package applehv import ( - "context" - "errors" "fmt" - "net" + "runtime" "strconv" - "time" "github.com/containers/common/pkg/config" - "github.com/containers/common/pkg/strongunits" gvproxy "github.com/containers/gvisor-tap-vsock/pkg/types" "github.com/containers/podman/v5/pkg/machine" - "github.com/containers/podman/v5/pkg/machine/applehv/vfkit" + "github.com/containers/podman/v5/pkg/machine/apple" + "github.com/containers/podman/v5/pkg/machine/apple/vfkit" "github.com/containers/podman/v5/pkg/machine/define" "github.com/containers/podman/v5/pkg/machine/ignition" "github.com/containers/podman/v5/pkg/machine/shim/diskpull" - "github.com/containers/podman/v5/pkg/machine/sockets" "github.com/containers/podman/v5/pkg/machine/vmconfigs" "github.com/containers/podman/v5/utils" vfConfig "github.com/crc-org/vfkit/pkg/config" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" ) // applehcMACAddress is a pre-defined mac address that vfkit recognizes // and is required for network flow -const applehvMACAddress = "5a:94:ef:e4:0c:ee" var ( - vfkitCommand = "vfkit" - gvProxyWaitBackoff = 500 * time.Millisecond - gvProxyMaxBackoffAttempts = 6 + vfkitCommand = "vfkit" ) type AppleHVStubber struct { @@ -54,9 +45,9 @@ func (a AppleHVStubber) RequireExclusiveActive() bool { func (a AppleHVStubber) CreateVM(opts define.CreateVMOpts, mc *vmconfigs.MachineConfig, ignBuilder *ignition.IgnitionBuilder) error { mc.AppleHypervisor = new(vmconfigs.AppleHVConfig) - mc.AppleHypervisor.Vfkit = vfkit.VfkitHelper{} + mc.AppleHypervisor.Vfkit = vfkit.Helper{} bl := vfConfig.NewEFIBootloader(fmt.Sprintf("%s/efi-bl-%s", opts.Dirs.DataDir.GetPath(), opts.Name), true) - mc.AppleHypervisor.Vfkit.VirtualMachine = vfConfig.NewVirtualMachine(uint(mc.Resources.CPUs), mc.Resources.Memory, bl) + mc.AppleHypervisor.Vfkit.VirtualMachine = vfConfig.NewVirtualMachine(uint(mc.Resources.CPUs), uint64(mc.Resources.Memory), bl) randPort, err := utils.GetRandomPort() if err != nil { @@ -64,15 +55,29 @@ func (a AppleHVStubber) CreateVM(opts define.CreateVMOpts, mc *vmconfigs.Machine } mc.AppleHypervisor.Vfkit.Endpoint = localhostURI + ":" + strconv.Itoa(randPort) - var virtiofsMounts []machine.VirtIoFs + virtiofsMounts := make([]machine.VirtIoFs, 0, len(mc.Mounts)) for _, mnt := range mc.Mounts { virtiofsMounts = append(virtiofsMounts, machine.MountToVirtIOFs(mnt)) } // Populate the ignition file with virtiofs stuff - ignBuilder.WithUnit(generateSystemDFilesForVirtiofsMounts(virtiofsMounts)...) + virtIOIgnitionMounts, err := apple.GenerateSystemDFilesForVirtiofsMounts(virtiofsMounts) + if err != nil { + return err + } + ignBuilder.WithUnit(virtIOIgnitionMounts...) - return resizeDisk(mc, strongunits.GiB(mc.Resources.DiskSize)) + cfg, err := config.Default() + if err != nil { + return err + } + rosetta := cfg.Machine.Rosetta + if runtime.GOARCH != "arm64" { + rosetta = false + } + mc.AppleHypervisor.Vfkit.Rosetta = rosetta + + return apple.ResizeDisk(mc, mc.Resources.DiskSize) } func (a AppleHVStubber) Exists(name string) (bool, error) { @@ -94,245 +99,49 @@ func (a AppleHVStubber) RemoveAndCleanMachines(_ *define.MachineDirs) error { } func (a AppleHVStubber) SetProviderAttrs(mc *vmconfigs.MachineConfig, opts define.SetOptions) error { - mc.Lock() - defer mc.Unlock() - state, err := a.State(mc, false) if err != nil { return err } - if state != define.Stopped { - return errors.New("unable to change settings unless vm is stopped") - } - - if opts.DiskSize != nil { - if err := resizeDisk(mc, *opts.DiskSize); err != nil { - return err - } - } - - if opts.Rootful != nil && mc.HostUser.Rootful != *opts.Rootful { - if err := mc.SetRootful(*opts.Rootful); err != nil { - return err - } - } - - if opts.USBs != nil { - return fmt.Errorf("changing USBs not supported for applehv machines") - } - - // VFKit does not require saving memory, disk, or cpu - return nil + return apple.SetProviderAttrs(mc, opts, state) } func (a AppleHVStubber) StartNetworking(mc *vmconfigs.MachineConfig, cmd *gvproxy.GvproxyCommand) error { - gvProxySock, err := mc.GVProxySocket() - if err != nil { - return err - } - // make sure it does not exist before gvproxy is called - if err := gvProxySock.Delete(); err != nil { - logrus.Error(err) - } - cmd.AddVfkitSocket(fmt.Sprintf("unixgram://%s", gvProxySock.GetPath())) - return nil + return apple.StartGenericNetworking(mc, cmd) } func (a AppleHVStubber) StartVM(mc *vmconfigs.MachineConfig) (func() error, func() error, error) { - var ( - ignitionSocket *define.VMFile - ) - - if bl := mc.AppleHypervisor.Vfkit.VirtualMachine.Bootloader; bl == nil { + bl := mc.AppleHypervisor.Vfkit.VirtualMachine.Bootloader + if bl == nil { return nil, nil, fmt.Errorf("unable to determine boot loader for this machine") } - // Add networking - netDevice, err := vfConfig.VirtioNetNew(applehvMACAddress) - if err != nil { - return nil, nil, err - } - // Set user networking with gvproxy - - gvproxySocket, err := mc.GVProxySocket() - if err != nil { - return nil, nil, err - } - - // Wait on gvproxy to be running and aware - if err := waitForGvProxy(gvproxySocket); err != nil { - return nil, nil, err - } - - netDevice.SetUnixSocketPath(gvproxySocket.GetPath()) - - // create a one-time virtual machine for starting because we dont want all this information in the - // machineconfig if possible. the preference was to derive this stuff - vm := vfConfig.NewVirtualMachine(uint(mc.Resources.CPUs), mc.Resources.Memory, mc.AppleHypervisor.Vfkit.VirtualMachine.Bootloader) - - defaultDevices, readySocket, err := getDefaultDevices(mc) - if err != nil { - return nil, nil, err - } - - vm.Devices = append(vm.Devices, defaultDevices...) - vm.Devices = append(vm.Devices, netDevice) - - mounts, err := virtIOFsToVFKitVirtIODevice(mc.Mounts) - if err != nil { - return nil, nil, err - } - vm.Devices = append(vm.Devices, mounts...) - - // To start the VM, we need to call vfkit cfg, err := config.Default() if err != nil { return nil, nil, err } - - vfkitBinaryPath, err := cfg.FindHelperBinary(vfkitCommand, true) - if err != nil { - return nil, nil, err - } - - logrus.Debugf("vfkit path is: %s", vfkitBinaryPath) - - cmd, err := vm.Cmd(vfkitBinaryPath) - if err != nil { - return nil, nil, err - } - - vfkitEndpointArgs, err := getVfKitEndpointCMDArgs(mc.AppleHypervisor.Vfkit.Endpoint) - if err != nil { - return nil, nil, err - } - - machineDataDir, err := mc.DataDir() - if err != nil { - return nil, nil, err - } - - cmd.Args = append(cmd.Args, vfkitEndpointArgs...) - - firstBoot, err := mc.IsFirstBoot() - if err != nil { - return nil, nil, err - } - - if logrus.IsLevelEnabled(logrus.DebugLevel) { - debugDevArgs, err := getDebugDevicesCMDArgs() - if err != nil { - return nil, nil, err + rosetta := cfg.Machine.Rosetta + rosettaNew := rosetta + if runtime.GOARCH == "arm64" { + rosettaMC := mc.AppleHypervisor.Vfkit.Rosetta + if rosettaMC != rosettaNew { + mc.AppleHypervisor.Vfkit.Rosetta = rosettaNew } - cmd.Args = append(cmd.Args, debugDevArgs...) - cmd.Args = append(cmd.Args, "--gui") // add command line switch to pop the gui open } - - if firstBoot { - // If this is the first boot of the vm, we need to add the vsock - // device to vfkit so we can inject the ignition file - socketName := fmt.Sprintf("%s-%s", mc.Name, ignitionSocketName) - ignitionSocket, err = machineDataDir.AppendToNewVMFile(socketName, &socketName) - if err != nil { - return nil, nil, err - } - if err := ignitionSocket.Delete(); err != nil { - logrus.Errorf("unable to delete ignition socket: %q", err) - } - - ignitionVsockDeviceCLI, err := getIgnitionVsockDeviceAsCLI(ignitionSocket.GetPath()) - if err != nil { - return nil, nil, err - } - cmd.Args = append(cmd.Args, ignitionVsockDeviceCLI...) - - logrus.Debug("first boot detected") - logrus.Debugf("serving ignition file over %s", ignitionSocket.GetPath()) - go func() { - if err := serveIgnitionOverSock(ignitionSocket, mc); err != nil { - logrus.Error(err) - } - logrus.Debug("ignition vsock server exited") - }() - } - - logrus.Debugf("listening for ready on: %s", readySocket.GetPath()) - if err := readySocket.Delete(); err != nil { - logrus.Warnf("unable to delete previous ready socket: %q", err) - } - readyListen, err := net.Listen("unix", readySocket.GetPath()) - if err != nil { - return nil, nil, err - } - - logrus.Debug("waiting for ready notification") - readyChan := make(chan error) - go sockets.ListenAndWaitOnSocket(readyChan, readyListen) - - logrus.Debugf("vfkit command-line: %v", cmd.Args) - - if err := cmd.Start(); err != nil { - return nil, nil, err - } - - returnFunc := func() error { - processErrChan := make(chan error) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go func() { - defer close(processErrChan) - for { - select { - case <-ctx.Done(): - return - default: - } - if err := checkProcessRunning("vfkit", cmd.Process.Pid); err != nil { - processErrChan <- err - return - } - // lets poll status every half second - time.Sleep(500 * time.Millisecond) - } - }() - - // wait for either socket or to be ready or process to have exited - select { - case err := <-processErrChan: - if err != nil { - return err - } - case err := <-readyChan: - if err != nil { - return err - } - logrus.Debug("ready notification received") - } - return nil - } - return cmd.Process.Release, returnFunc, nil + return apple.StartGenericAppleVM(mc, vfkitCommand, bl, mc.AppleHypervisor.Vfkit.Endpoint) } func (a AppleHVStubber) StopHostNetworking(_ *vmconfigs.MachineConfig, _ define.VMType) error { return nil } -func (a AppleHVStubber) VMType() define.VMType { - return define.AppleHvVirt +func (a AppleHVStubber) UpdateSSHPort(mc *vmconfigs.MachineConfig, port int) error { + // managed by gvproxy on this backend, so nothing to do + return nil } -func waitForGvProxy(gvproxySocket *define.VMFile) error { - backoffWait := gvProxyWaitBackoff - logrus.Debug("checking that gvproxy is running") - for i := 0; i < gvProxyMaxBackoffAttempts; i++ { - err := unix.Access(gvproxySocket.GetPath(), unix.W_OK) - if err == nil { - return nil - } - time.Sleep(backoffWait) - backoffWait *= 2 - } - return fmt.Errorf("unable to connect to gvproxy %q", gvproxySocket.GetPath()) +func (a AppleHVStubber) VMType() define.VMType { + return define.AppleHvVirt } func (a AppleHVStubber) PrepareIgnition(_ *vmconfigs.MachineConfig, _ *ignition.IgnitionBuilder) (*ignition.ReadyUnitOpts, error) { @@ -346,3 +155,8 @@ func (a AppleHVStubber) PostStartNetworking(mc *vmconfigs.MachineConfig, noInfo func (a AppleHVStubber) GetDisk(userInputPath string, dirs *define.MachineDirs, mc *vmconfigs.MachineConfig) error { return diskpull.GetDisk(userInputPath, dirs, mc.ImagePath, a.VMType(), mc.Name) } + +func (a *AppleHVStubber) GetRosetta(mc *vmconfigs.MachineConfig) (bool, error) { + rosetta := mc.AppleHypervisor.Vfkit.Rosetta + return rosetta, nil +} diff --git a/pkg/machine/applehv/vfkit.go b/pkg/machine/applehv/vfkit.go deleted file mode 100644 index e476614890..0000000000 --- a/pkg/machine/applehv/vfkit.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build darwin - -package applehv - -import ( - "github.com/containers/podman/v5/pkg/machine/define" - "github.com/containers/podman/v5/pkg/machine/vmconfigs" - vfConfig "github.com/crc-org/vfkit/pkg/config" -) - -func getDefaultDevices(mc *vmconfigs.MachineConfig) ([]vfConfig.VirtioDevice, *define.VMFile, error) { - var devices []vfConfig.VirtioDevice - - disk, err := vfConfig.VirtioBlkNew(mc.ImagePath.GetPath()) - if err != nil { - return nil, nil, err - } - rng, err := vfConfig.VirtioRngNew() - if err != nil { - return nil, nil, err - } - - logfile, err := mc.LogFile() - if err != nil { - return nil, nil, err - } - serial, err := vfConfig.VirtioSerialNew(logfile.GetPath()) - if err != nil { - return nil, nil, err - } - - readySocket, err := mc.ReadySocket() - if err != nil { - return nil, nil, err - } - - readyDevice, err := vfConfig.VirtioVsockNew(1025, readySocket.GetPath(), true) - if err != nil { - return nil, nil, err - } - devices = append(devices, disk, rng, serial, readyDevice) - return devices, readySocket, nil -} - -func getDebugDevices() ([]vfConfig.VirtioDevice, error) { - var devices []vfConfig.VirtioDevice - gpu, err := vfConfig.VirtioGPUNew() - if err != nil { - return nil, err - } - mouse, err := vfConfig.VirtioInputNew(vfConfig.VirtioInputPointingDevice) - if err != nil { - return nil, err - } - kb, err := vfConfig.VirtioInputNew(vfConfig.VirtioInputKeyboardDevice) - if err != nil { - return nil, err - } - return append(devices, gpu, mouse, kb), nil -} - -func getIgnitionVsockDevice(path string) (vfConfig.VirtioDevice, error) { - return vfConfig.VirtioVsockNew(1024, path, true) -} - -func virtIOFsToVFKitVirtIODevice(mounts []*vmconfigs.Mount) ([]vfConfig.VirtioDevice, error) { - var virtioDevices []vfConfig.VirtioDevice - for _, vol := range mounts { - virtfsDevice, err := vfConfig.VirtioFsNew(vol.Source, vol.Tag) - if err != nil { - return nil, err - } - virtioDevices = append(virtioDevices, virtfsDevice) - } - return virtioDevices, nil -} diff --git a/pkg/machine/applehv/vfkit/config.go b/pkg/machine/applehv/vfkit/config.go deleted file mode 100644 index 816f63167b..0000000000 --- a/pkg/machine/applehv/vfkit/config.go +++ /dev/null @@ -1,126 +0,0 @@ -//go:build darwin - -package vfkit - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "time" - - "github.com/containers/podman/v5/pkg/machine/define" - "github.com/crc-org/vfkit/pkg/config" - rest "github.com/crc-org/vfkit/pkg/rest/define" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -const ( - inspect = "/vm/inspect" - state = "/vm/state" - version = "/version" -) - -func (vf *VfkitHelper) get(endpoint string, payload io.Reader) (*http.Response, error) { - client := &http.Client{} - req, err := http.NewRequest(http.MethodGet, endpoint, payload) - if err != nil { - return nil, err - } - return client.Do(req) -} - -func (vf *VfkitHelper) post(endpoint string, payload io.Reader) (*http.Response, error) { - client := &http.Client{} - req, err := http.NewRequest(http.MethodPost, endpoint, payload) - if err != nil { - return nil, err - } - return client.Do(req) -} - -// getRawState asks vfkit for virtual machine state unmodified (see state()) -func (vf *VfkitHelper) getRawState() (define.Status, error) { - var response rest.VMState - endPoint := vf.Endpoint + state - serverResponse, err := vf.get(endPoint, nil) - if err != nil { - if errors.Is(err, unix.ECONNREFUSED) { - logrus.Debugf("connection refused: %s", endPoint) - } - return "", err - } - err = json.NewDecoder(serverResponse.Body).Decode(&response) - if err != nil { - return "", err - } - if err := serverResponse.Body.Close(); err != nil { - logrus.Error(err) - } - return ToMachineStatus(response.State) -} - -// state asks vfkit for the virtual machine state. in case the vfkit -// service is not responding, we assume the service is not running -// and return a stopped status -func (vf *VfkitHelper) State() (define.Status, error) { - vmState, err := vf.getRawState() - if err == nil { - return vmState, nil - } - if errors.Is(err, unix.ECONNREFUSED) { - return define.Stopped, nil - } - return "", err -} - -func (vf *VfkitHelper) stateChange(newState rest.StateChange) error { - b, err := json.Marshal(rest.VMState{State: string(newState)}) - if err != nil { - return err - } - payload := bytes.NewReader(b) - _, err = vf.post(vf.Endpoint+state, payload) - return err -} - -func (vf *VfkitHelper) Stop(force, wait bool) error { - waitDuration := time.Millisecond * 10 - // TODO Add ability to wait until stopped - if force { - if err := vf.stateChange(rest.HardStop); err != nil { - return err - } - } else { - if err := vf.stateChange(rest.Stop); err != nil { - return err - } - } - if !wait { - return nil - } - waitErr := fmt.Errorf("failed waiting for vm to stop") - // Backoff to wait on the machine shutdown - for i := 0; i < 11; i++ { - _, err := vf.getRawState() - if err != nil || errors.Is(err, unix.ECONNREFUSED) { - waitErr = nil - break - } - waitDuration *= 2 - logrus.Debugf("backoff wait time: %s", waitDuration.String()) - time.Sleep(waitDuration) - } - return waitErr -} - -// VfkitHelper describes the use of vfkit: cmdline and endpoint -type VfkitHelper struct { - LogLevel logrus.Level - Endpoint string - VfkitBinaryPath *define.VMFile - VirtualMachine *config.VirtualMachine -} diff --git a/pkg/machine/cleanup.go b/pkg/machine/cleanup.go index ff23846cf7..97e45b2c6a 100644 --- a/pkg/machine/cleanup.go +++ b/pkg/machine/cleanup.go @@ -55,7 +55,7 @@ func (c *CleanupCallback) clean() { } } -func InitCleanup() CleanupCallback { +func CleanUp() CleanupCallback { return CleanupCallback{ Funcs: []func() error{}, } diff --git a/pkg/machine/compression/compression_test.go b/pkg/machine/compression/compression_test.go index afffce1e5b..9c51cd8bca 100644 --- a/pkg/machine/compression/compression_test.go +++ b/pkg/machine/compression/compression_test.go @@ -1,6 +1,13 @@ package compression -import "testing" +import ( + "os" + "testing" + + "github.com/containers/podman/v5/pkg/machine/define" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) func Test_compressionFromFile(t *testing.T) { type args struct { @@ -33,11 +40,11 @@ func Test_compressionFromFile(t *testing.T) { want: Bz2, }, { - name: "default is xz", + name: "default is zstd", args: args{ path: "/tmp/foo", }, - want: Xz, + want: Zstd, }, } for _, tt := range tests { @@ -76,9 +83,9 @@ func TestImageCompression_String(t *testing.T) { want: "zip", }, { - name: "xz is default", + name: "zstd is default", c: 99, - want: "xz", + want: "zst", }, } for _, tt := range tests { @@ -89,3 +96,43 @@ func TestImageCompression_String(t *testing.T) { }) } } + +func Test_Decompress(t *testing.T) { + type args struct { + src string + dst string + } + + type want string + + tests := []struct { + name string + args args + want want + }{ + {name: "zip", args: args{src: "./testdata/sample.zip", dst: "./testdata/hellozip"}, want: "zip\n"}, + {name: "zip with trailing zeros", args: args{src: "./testdata/sample-withzeros.zip", dst: "./testdata/hellozip-withzeros"}, want: "zip\n\x00\x00\x00\x00\x00\x00"}, + {name: "xz", args: args{src: "./testdata/sample.xz", dst: "./testdata/helloxz"}, want: "xz\n"}, + {name: "xz with trailing zeros", args: args{src: "./testdata/sample-withzeros.xz", dst: "./testdata/helloxz-withzeros"}, want: "xz\n\x00\x00\x00\x00\x00\x00\x00"}, + {name: "gzip", args: args{src: "./testdata/sample.gz", dst: "./testdata/hellogz"}, want: "gzip\n"}, + {name: "gzip with trailing zeros", args: args{src: "./testdata/sample-withzeros.gz", dst: "./testdata/hellogzip-withzeros"}, want: "gzip\n\x00\x00\x00\x00\x00"}, + {name: "bzip2", args: args{src: "./testdata/sample.bz2", dst: "./testdata/hellobz2"}, want: "bzip2\n"}, + {name: "bzip2 with trailing zeros", args: args{src: "./testdata/sample-withzeros.bz2", dst: "./testdata/hellobz2-withzeros"}, want: "bzip2\n\x00\x00\x00\x00"}, + {name: "zstd", args: args{src: "./testdata/sample.zst", dst: "./testdata/hellozstd"}, want: "zstd\n"}, + {name: "zstd with trailing zeros", args: args{src: "./testdata/sample-withzeros.zst", dst: "./testdata/hellozstd-withzeros"}, want: "zstd\n\x00\x00\x00\x00\x00"}, + {name: "uncompressed", args: args{src: "./testdata/sample.uncompressed", dst: "./testdata/hellouncompressed"}, want: "uncompressed\n"}, + {name: "uncompressed with trailing zeros", args: args{src: "./testdata/sample-withzeros.uncompressed", dst: "./testdata/hellozuncompressed-withzeros"}, want: "uncompressed\n\x00\x00\x00\x00\x00\x00\x00"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + srcVMFile := &define.VMFile{Path: tt.args.src} + dstFilePath := tt.args.dst + defer os.Remove(dstFilePath) + err := Decompress(srcVMFile, dstFilePath) + require.NoError(t, err) + data, err := os.ReadFile(dstFilePath) + require.NoError(t, err) + assert.Equal(t, string(tt.want), string(data)) + }) + } +} diff --git a/pkg/machine/compression/config.go b/pkg/machine/compression/config.go index 0201f75331..db2ec0d002 100644 --- a/pkg/machine/compression/config.go +++ b/pkg/machine/compression/config.go @@ -9,6 +9,7 @@ const ( Zip Gz Bz2 + Zstd ) func KindFromFile(path string) ImageCompression { @@ -19,8 +20,10 @@ func KindFromFile(path string) ImageCompression { return Gz case strings.HasSuffix(path, Zip.String()): return Zip + case strings.HasSuffix(path, Xz.String()): + return Xz } - return Xz + return Zstd } func (c ImageCompression) String() string { @@ -31,6 +34,8 @@ func (c ImageCompression) String() string { return "zip" case Bz2: return "bz2" + case Xz: + return "xz" } - return "xz" + return "zst" } diff --git a/pkg/machine/compression/decompress.go b/pkg/machine/compression/decompress.go index 5ab0221b3a..c13335307b 100644 --- a/pkg/machine/compression/decompress.go +++ b/pkg/machine/compression/decompress.go @@ -1,279 +1,103 @@ package compression import ( - "archive/zip" - "bufio" - "compress/gzip" - "errors" - "fmt" "io" "os" - "os/exec" "path/filepath" - "runtime" "strings" - "github.com/containers/image/v5/pkg/compression" "github.com/containers/podman/v5/pkg/machine/define" "github.com/containers/podman/v5/utils" "github.com/containers/storage/pkg/archive" - crcOs "github.com/crc-org/crc/v2/pkg/os" "github.com/sirupsen/logrus" - "github.com/ulikunitz/xz" ) -// Decompress is a generic wrapper for various decompression algos -// TODO this needs some love. in the various decompression functions that are -// called, the same uncompressed path is being opened multiple times. -func Decompress(localPath *define.VMFile, uncompressedPath string) error { - var isZip bool - uncompressedFileWriter, err := os.OpenFile(uncompressedPath, os.O_CREATE|os.O_RDWR, 0600) - if err != nil { - return err - } - defer func() { - if err := uncompressedFileWriter.Close(); err != nil && !errors.Is(err, os.ErrClosed) { - logrus.Warnf("unable to close decompressed file %s: %q", uncompressedPath, err) - } - }() - sourceFile, err := localPath.Read() - if err != nil { - return err - } - if strings.HasSuffix(localPath.GetPath(), ".zip") { - isZip = true - } - compressionType := archive.DetectCompression(sourceFile) - - prefix := "Extracting compressed file" - prefix += ": " + filepath.Base(uncompressedPath) - switch compressionType { - case archive.Xz: - return decompressXZ(prefix, localPath.GetPath(), uncompressedFileWriter) - case archive.Uncompressed: - if isZip && runtime.GOOS == "windows" { - return decompressZip(prefix, localPath.GetPath(), uncompressedFileWriter) - } - // here we should just do a copy - dstFile, err := os.Open(localPath.GetPath()) - if err != nil { - return err - } - fmt.Printf("Copying uncompressed file %q to %q/n", localPath.GetPath(), dstFile.Name()) - _, err = crcOs.CopySparse(uncompressedFileWriter, dstFile) - return err - case archive.Gzip: - if runtime.GOOS == "darwin" { - return decompressGzWithSparse(prefix, localPath, uncompressedPath) - } - fallthrough - default: - return decompressEverythingElse(prefix, localPath.GetPath(), uncompressedFileWriter) - } - - // if compressionType != archive.Uncompressed || isZip { - // prefix = "Extracting compressed file" - // } - // prefix += ": " + filepath.Base(uncompressedPath) - // if compressionType == archive.Xz { - // return decompressXZ(prefix, localPath.GetPath(), uncompressedFileWriter) - // } - // if isZip && runtime.GOOS == "windows" { - // return decompressZip(prefix, localPath.GetPath(), uncompressedFileWriter) - // } +const ( + decompressedFileFlag = os.O_CREATE | os.O_TRUNC | os.O_WRONLY + macOs = "darwin" + progressBarPrefix = "Extracting compressed file" + zipExt = ".zip" + magicNumberMaxBytes = 10 +) - // Unfortunately GZ is not sparse capable. Lets handle it differently - // if compressionType == archive.Gzip && runtime.GOOS == "darwin" { - // return decompressGzWithSparse(prefix, localPath, uncompressedPath) - // } - // return decompressEverythingElse(prefix, localPath.GetPath(), uncompressedFileWriter) +type decompressor interface { + compressedFileSize() int64 + compressedFileMode() os.FileMode + compressedFileReader() (io.ReadCloser, error) + decompress(w io.WriteSeeker, r io.Reader) error + close() } -// Will error out if file without .Xz already exists -// Maybe extracting then renaming is a good idea here.. -// depends on Xz: not pre-installed on mac, so it becomes a brew dependency -func decompressXZ(prefix string, src string, output io.WriteCloser) error { - var read io.Reader - var cmd *exec.Cmd - - stat, err := os.Stat(src) - if err != nil { - return err - } - file, err := os.Open(src) +func Decompress(compressedVMFile *define.VMFile, decompressedFilePath string) error { + compressedFilePath := compressedVMFile.GetPath() + compressedFileMagicNum, err := compressedVMFile.ReadMagicNumber(magicNumberMaxBytes) if err != nil { return err } - defer file.Close() - - p, bar := utils.ProgressBar(prefix, stat.Size(), prefix+": done") - proxyReader := bar.ProxyReader(file) - defer func() { - if err := proxyReader.Close(); err != nil { - logrus.Error(err) - } - }() - // Prefer Xz utils for fastest performance, fallback to go xi2 impl - if _, err := exec.LookPath("xz"); err == nil { - cmd = exec.Command("xz", "-d", "-c") - cmd.Stdin = proxyReader - read, err = cmd.StdoutPipe() - if err != nil { - return err - } - cmd.Stderr = os.Stderr - } else { - // This XZ implementation is reliant on buffering. It is also 3x+ slower than XZ utils. - // Consider replacing with a faster implementation (e.g. xi2) if podman machine is - // updated with a larger image for the distribution base. - buf := bufio.NewReader(proxyReader) - read, err = xz.NewReader(buf) - if err != nil { - return err - } + var d decompressor + if d, err = newDecompressor(compressedFilePath, compressedFileMagicNum); err != nil { + return err } - done := make(chan bool) - go func() { - if _, err := io.Copy(output, read); err != nil { - logrus.Error(err) - } - output.Close() - done <- true - }() + return runDecompression(d, decompressedFilePath) +} - if cmd != nil { - err := cmd.Start() - if err != nil { - return err - } - p.Wait() - return cmd.Wait() +func newDecompressor(compressedFilePath string, compressedFileMagicNum []byte) (decompressor, error) { + compressionType := archive.DetectCompression(compressedFileMagicNum) + hasZipSuffix := strings.HasSuffix(compressedFilePath, zipExt) + + switch { + // Zip files are not guaranteed to have a magic number at the beginning + // of the file, so we need to use the file name to detect them. + case compressionType == archive.Uncompressed && hasZipSuffix: + return newZipDecompressor(compressedFilePath) + case compressionType == archive.Uncompressed: + return newUncompressedDecompressor(compressedFilePath) + default: + return newGenericDecompressor(compressedFilePath) } - <-done - p.Wait() - return nil } -func decompressEverythingElse(prefix string, src string, output io.WriteCloser) error { - stat, err := os.Stat(src) - if err != nil { - return err - } - f, err := os.Open(src) +func runDecompression(d decompressor, decompressedFilePath string) (retErr error) { + compressedFileReader, err := d.compressedFileReader() if err != nil { return err } - p, bar := utils.ProgressBar(prefix, stat.Size(), prefix+": done") - proxyReader := bar.ProxyReader(f) - defer func() { - if err := proxyReader.Close(); err != nil { - logrus.Error(err) - } - }() - uncompressStream, _, err := compression.AutoDecompress(proxyReader) - if err != nil { - return err - } - defer func() { - if err := uncompressStream.Close(); err != nil { - logrus.Error(err) - } - if err := output.Close(); err != nil { - logrus.Error(err) - } - }() + defer d.close() - _, err = io.Copy(output, uncompressStream) - p.Wait() - return err -} + initMsg := progressBarPrefix + ": " + filepath.Base(decompressedFilePath) + finalMsg := initMsg + ": done" -func decompressZip(prefix string, src string, output io.WriteCloser) error { - zipReader, err := zip.OpenReader(src) - if err != nil { - return err - } - if len(zipReader.File) != 1 { - return errors.New("machine image files should consist of a single compressed file") - } - f, err := zipReader.File[0].Open() - if err != nil { - return err - } - defer func() { - if err := f.Close(); err != nil { - logrus.Error(err) - } - }() - defer func() { - if err := output.Close(); err != nil { - logrus.Error(err) - } - }() - size := int64(zipReader.File[0].CompressedSize64) - p, bar := utils.ProgressBar(prefix, size, prefix+": done") - proxyReader := bar.ProxyReader(f) - defer func() { - if err := proxyReader.Close(); err != nil { - logrus.Error(err) - } - }() - _, err = io.Copy(output, proxyReader) - p.Wait() - return err -} + p, bar := utils.ProgressBar(initMsg, d.compressedFileSize(), finalMsg) + // Wait for bars to complete and then shut down the bars container + defer p.Wait() -func decompressGzWithSparse(prefix string, compressedPath *define.VMFile, uncompressedPath string) error { - stat, err := os.Stat(compressedPath.GetPath()) - if err != nil { - return err - } + compressedFileReaderProxy := bar.ProxyReader(compressedFileReader) + // Interrupts the bar goroutine. It's important that + // bar.Abort(false) is called before p.Wait(), otherwise + // can hang. + defer bar.Abort(false) - dstFile, err := os.OpenFile(uncompressedPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, stat.Mode()) - if err != nil { - return err - } - defer func() { - if err := dstFile.Close(); err != nil { - logrus.Errorf("unable to close uncompressed file %s: %q", uncompressedPath, err) - } - }() + var decompressedFileWriter *os.File - f, err := os.Open(compressedPath.GetPath()) - if err != nil { + if decompressedFileWriter, err = os.OpenFile(decompressedFilePath, decompressedFileFlag, d.compressedFileMode()); err != nil { + logrus.Errorf("Unable to open destination file %s for writing: %q", decompressedFilePath, err) return err } defer func() { - if err := f.Close(); err != nil { - logrus.Errorf("unable to close on compressed file %s: %q", compressedPath.GetPath(), err) + if err := decompressedFileWriter.Close(); err != nil { + logrus.Warnf("Unable to to close destination file %s: %q", decompressedFilePath, err) + if retErr == nil { + retErr = err + } } }() - gzReader, err := gzip.NewReader(f) - if err != nil { + if err = d.decompress(decompressedFileWriter, compressedFileReaderProxy); err != nil { + logrus.Errorf("Error extracting compressed file: %q", err) return err } - defer func() { - if err := gzReader.Close(); err != nil { - logrus.Errorf("unable to close gzreader: %q", err) - } - }() - // TODO remove the following line when progress bars work - _ = prefix - // p, bar := utils.ProgressBar(prefix, stat.Size(), prefix+": done") - // proxyReader := bar.ProxyReader(f) - // defer func() { - // if err := proxyReader.Close(); err != nil { - // logrus.Error(err) - // } - // }() - - logrus.Debugf("decompressing %s", compressedPath.GetPath()) - _, err = crcOs.CopySparse(dstFile, gzReader) - logrus.Debug("decompression complete") - // p.Wait() - return err + return nil } diff --git a/pkg/machine/compression/generic.go b/pkg/machine/compression/generic.go new file mode 100644 index 0000000000..0b70ff92e9 --- /dev/null +++ b/pkg/machine/compression/generic.go @@ -0,0 +1,87 @@ +package compression + +import ( + "io" + "io/fs" + "os" + "runtime" + + "github.com/containers/image/v5/pkg/compression" + "github.com/sirupsen/logrus" +) + +type genericDecompressor struct { + compressedFilePath string + compressedFile *os.File + compressedFileInfo os.FileInfo +} + +func newGenericDecompressor(compressedFilePath string) (*genericDecompressor, error) { + d := &genericDecompressor{} + d.compressedFilePath = compressedFilePath + stat, err := os.Stat(d.compressedFilePath) + if err != nil { + return nil, err + } + d.compressedFileInfo = stat + return d, nil +} + +func (d *genericDecompressor) compressedFileSize() int64 { + return d.compressedFileInfo.Size() +} + +func (d *genericDecompressor) compressedFileMode() fs.FileMode { + return d.compressedFileInfo.Mode() +} + +func (d *genericDecompressor) compressedFileReader() (io.ReadCloser, error) { + compressedFile, err := os.Open(d.compressedFilePath) + if err != nil { + return nil, err + } + d.compressedFile = compressedFile + return compressedFile, nil +} + +func (d *genericDecompressor) decompress(w io.WriteSeeker, r io.Reader) error { + decompressedFileReader, _, err := compression.AutoDecompress(r) + if err != nil { + return err + } + defer func() { + if err := decompressedFileReader.Close(); err != nil { + logrus.Errorf("Unable to close gz file: %q", err) + } + }() + + // Use sparse-optimized copy for macOS as applehv, + // macOS native hypervisor, uses large raw VM disk + // files mostly empty (~2GB of content ~8GB empty). + if runtime.GOOS == macOs { + err = d.sparseOptimizedCopy(w, decompressedFileReader) + } else { + _, err = io.Copy(w, decompressedFileReader) + } + + return err +} + +func (d *genericDecompressor) close() { + if err := d.compressedFile.Close(); err != nil { + logrus.Errorf("Unable to close compressed file: %q", err) + } +} + +func (d *genericDecompressor) sparseOptimizedCopy(w io.WriteSeeker, r io.Reader) error { + var err error + sparseWriter := NewSparseWriter(w) + defer func() { + e := sparseWriter.Close() + if e != nil && err == nil { + err = e + } + }() + _, err = io.Copy(sparseWriter, r) + return err +} diff --git a/pkg/machine/compression/sparse_file_writer.go b/pkg/machine/compression/sparse_file_writer.go new file mode 100644 index 0000000000..d97ac9f6d0 --- /dev/null +++ b/pkg/machine/compression/sparse_file_writer.go @@ -0,0 +1,139 @@ +package compression + +import ( + "errors" + "fmt" + "io" +) + +const zerosThreshold = 1024 + +type WriteSeekCloser interface { + io.Closer + io.WriteSeeker +} + +type sparseWriter struct { + file io.WriteSeeker + // Invariant between method calls: + // The contents of the file match the contents passed to Write, except that pendingZeroes trailing zeroes have not been written. + // Also, the data that _has_ been written does not end with a zero byte (i.e. pendingZeroes is the largest possible value. + pendingZeroes int64 +} + +// NewSparseWriter returns a WriteCloser for underlying file which creates +// holes where appropriate. +// NOTE: The caller must .Close() both the returned sparseWriter AND the underlying file, +// in that order. +func NewSparseWriter(file io.WriteSeeker) *sparseWriter { + return &sparseWriter{ + file: file, + pendingZeroes: 0, + } +} + +func (sw *sparseWriter) createHole(size int64) error { + _, err := sw.file.Seek(size, io.SeekCurrent) + return err +} + +func zeroSpanEnd(b []byte, i int) int { + for i < len(b) && b[i] == 0 { + i++ + } + return i +} + +func nonzeroSpanEnd(b []byte, i int) int { + for i < len(b) && b[i] != 0 { + i++ + } + return i +} + +// Write writes data to the file, creating holes for long sequences of zeros. +func (sw *sparseWriter) Write(data []byte) (int, error) { + initialZeroSpanLength := zeroSpanEnd(data, 0) + if initialZeroSpanLength == len(data) { + sw.pendingZeroes += int64(initialZeroSpanLength) + return initialZeroSpanLength, nil + } + + // We have _some_ non-zero data to write. + // Think of the input as an alternating sequence of spans of zeroes / non-zeroes 0a0b…c0, + // where the starting/ending span of zeroes may be empty. + + pendingWriteOffset := 0 + // The expected condition for creating a hole would be sw.pendingZeroes + initialZeroSpanLength >= zerosThreshold; but + // if sw.pendingZeroes != 0, we are going to spend a syscall to deal with sw.pendingZeroes either way. + // We might just as well make it a createHole(), even if the hole size is below zeroThreshold. + if sw.pendingZeroes != 0 || initialZeroSpanLength >= zerosThreshold { + if err := sw.createHole(sw.pendingZeroes + int64(initialZeroSpanLength)); err != nil { + return -1, err + } + // We could set sw.pendingZeroes = 0 now; it would always be overwritten on successful return from this function. + pendingWriteOffset = initialZeroSpanLength + } + + current := initialZeroSpanLength + for { + // Invariant at this point of this loop: + // - pendingWriteOffset <= current < len(data) + // - data[current] != 0 + // - data[pendingWriteOffset:current] has not yet been written + if pendingWriteOffset > current || current >= len(data) { + return -1, fmt.Errorf("internal error: sparseWriter invariant violation: %d <= %d < %d", pendingWriteOffset, current, len(data)) + } + if b := data[current]; b == 0 { + return -1, fmt.Errorf("internal error: sparseWriter invariant violation: %d@%d", b, current) + } + + nonzeroSpanEnd := nonzeroSpanEnd(data, current) + if nonzeroSpanEnd == current { + return -1, fmt.Errorf("internal error: sparseWriter’s nonzeroSpanEnd didn’t advance") + } + zeroSpanEnd := zeroSpanEnd(data, nonzeroSpanEnd) // possibly == nonzeroSpanEnd + zeroSpanLength := zeroSpanEnd - nonzeroSpanEnd + if zeroSpanEnd < len(data) && zeroSpanLength < zerosThreshold { + // Too small a hole, keep going + current = zeroSpanEnd + continue + } + + // We have either reached the end, or found an interesting hole. Issue a write. + if _, err := sw.file.Write(data[pendingWriteOffset:nonzeroSpanEnd]); err != nil { + return -1, err + } + if zeroSpanEnd == len(data) { + sw.pendingZeroes = int64(zeroSpanLength) + return zeroSpanEnd, nil + } + + if err := sw.createHole(int64(zeroSpanLength)); err != nil { + return -1, err + } + pendingWriteOffset = zeroSpanEnd + current = zeroSpanEnd + } +} + +// Close closes the SparseWriter's underlying file. +func (sw *sparseWriter) Close() error { + if sw.file == nil { + return errors.New("file is already closed") + } + if sw.pendingZeroes != 0 { + if holeSize := sw.pendingZeroes - 1; holeSize >= zerosThreshold { + if err := sw.createHole(holeSize); err != nil { + return err + } + sw.pendingZeroes -= holeSize + } + var zeroArray [zerosThreshold]byte + if _, err := sw.file.Write(zeroArray[:sw.pendingZeroes]); err != nil { + return err + } + } + sw.file = nil + return nil +} diff --git a/pkg/machine/compression/sparse_file_writer_test.go b/pkg/machine/compression/sparse_file_writer_test.go new file mode 100644 index 0000000000..d4a06625b4 --- /dev/null +++ b/pkg/machine/compression/sparse_file_writer_test.go @@ -0,0 +1,123 @@ +package compression + +import ( + "bytes" + "errors" + "fmt" + "io" + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +type memorySparseFile struct { + buffer bytes.Buffer + pos int64 + sparse int64 +} + +func (m *memorySparseFile) Seek(offset int64, whence int) (int64, error) { + logrus.Debugf("Seek %d %d", offset, whence) + var newPos int64 + switch whence { + case io.SeekStart: + panic("unexpected") + case io.SeekCurrent: + newPos = m.pos + offset + if offset < -1 { + panic("unexpected") + } + m.sparse += offset + case io.SeekEnd: + newPos = int64(m.buffer.Len()) + offset + default: + return 0, errors.New("unsupported seek whence") + } + + if newPos < 0 { + return 0, errors.New("negative position is not allowed") + } + + m.pos = newPos + return newPos, nil +} + +func (m *memorySparseFile) Write(b []byte) (n int, err error) { + logrus.Debugf("Write %d", len(b)) + if int64(m.buffer.Len()) < m.pos { + padding := make([]byte, m.pos-int64(m.buffer.Len())) + _, err := m.buffer.Write(padding) + if err != nil { + return 0, err + } + } + + n, err = m.buffer.Write(b) + m.pos += int64(n) + return n, err +} + +func testInputWithWriteLen(t *testing.T, input []byte, minSparse int64, chunkSize int) { + m := &memorySparseFile{} + sparseWriter := NewSparseWriter(m) + + for i := 0; i < len(input); i += chunkSize { + end := i + chunkSize + if end > len(input) { + end = len(input) + } + _, err := sparseWriter.Write(input[i:end]) + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + } + err := sparseWriter.Close() + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + assert.Equal(t, string(input), m.buffer.String()) + assert.GreaterOrEqual(t, m.sparse, minSparse) +} + +func testInput(t *testing.T, name string, inputBytes []byte, minSparse int64) { + currentLen := 1 + for { + t.Run(fmt.Sprintf("%s@%d", name, currentLen), func(t *testing.T) { + testInputWithWriteLen(t, inputBytes, minSparse, currentLen) + }) + currentLen <<= 1 + if currentLen > len(inputBytes) { + break + } + } +} + +func TestSparseWriter(t *testing.T) { + testInput(t, "small contents", []byte("hello"), 0) + testInput(t, "small zeroes", append(make([]byte, 100), []byte("hello")...), 0) + testInput(t, "empty", []byte(""), 0) + testInput(t, "small iterated", []byte{'a', 0, 'a', 0, 'a', 0}, 0) + testInput(t, "small iterated2", []byte{0, 'a', 0, 'a', 0, 'a'}, 0) + + // add "hello" at the beginning + const largeSize = 1024 * 1024 + largeInput := make([]byte, largeSize) + copy(largeInput, []byte("hello")) + testInput(t, "sparse end", largeInput, largeSize-5-1) // -1 for the final byte establishing file size + + // add "hello" at the end + largeInput = make([]byte, largeSize) + copy(largeInput[largeSize-5:], []byte("hello")) + testInput(t, "sparse beginning", largeInput, largeSize-5) + + // add "hello" in the middle + largeInput = make([]byte, largeSize) + copy(largeInput[len(largeInput)/2:], []byte("hello")) + testInput(t, "sparse both ends", largeInput, largeSize-5-1) // -1 for the final byte establishing file size + + largeInput = make([]byte, largeSize) + copy(largeInput[0:5], []byte("hello")) + copy(largeInput[largeSize-5:], []byte("HELLO")) + testInput(t, "sparse middle", largeInput, largeSize-10) +} diff --git a/pkg/machine/compression/testdata/gen-testdata.sh b/pkg/machine/compression/testdata/gen-testdata.sh new file mode 100755 index 0000000000..375b039e19 --- /dev/null +++ b/pkg/machine/compression/testdata/gen-testdata.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +echo "zstd" > hellozstd-withzeros && \ +truncate -c -s 10 hellozstd-withzeros && \ +zstd -f --sparse hellozstd-withzeros -o sample-withzeros.zst && \ +rm hellozstd-withzeros + +echo "zstd" > hellozstd && \ +zstd -f --sparse hellozstd -o sample.zst && \ +rm hellozstd + +echo "gzip" > hellogzip-withzeros && \ +truncate -c -s 10 hellogzip-withzeros && \ +gzip -f -c hellogzip-withzeros > sample-withzeros.gz && \ +rm hellogzip-withzeros + +echo "gzip" > hellogzip && \ +gzip -f -c hellogzip > sample.gz && \ +rm hellogzip + +echo "bzip2" > hellobzip2-withzeros && \ +truncate -c -s 10 hellobzip2-withzeros && \ +bzip2 -f -c hellobzip2-withzeros > sample-withzeros.bz2 && \ +rm hellobzip2-withzeros + +echo "bzip2" > hellobzip2 && \ +bzip2 -f -c hellobzip2 > sample.bz2 && \ +rm hellobzip2 + +echo "uncompressed" > sample-withzeros.uncompressed && \ +truncate -c -s 20 sample-withzeros.uncompressed + +echo "uncompressed" > sample.uncompressed + +echo "xz" > helloxz-withzeros && \ +truncate -c -s 10 helloxz-withzeros && \ +xz -f -z -c helloxz-withzeros > sample-withzeros.xz && \ +rm helloxz-withzeros + +echo "xz" > helloxz && \ +xz -f -z -c helloxz > sample.xz && \ +rm helloxz + +echo "zip" > hellozip-withzeros && \ +truncate -c -s 10 hellozip-withzeros && \ +zip sample-withzeros.zip hellozip-withzeros && \ +rm hellozip-withzeros + +echo "zip" > hellozip && \ +zip sample.zip hellozip && \ +rm hellozip diff --git a/pkg/machine/compression/testdata/sample-withzeros.bz2 b/pkg/machine/compression/testdata/sample-withzeros.bz2 new file mode 100644 index 0000000000..782a681c6c Binary files /dev/null and b/pkg/machine/compression/testdata/sample-withzeros.bz2 differ diff --git a/pkg/machine/compression/testdata/sample-withzeros.gz b/pkg/machine/compression/testdata/sample-withzeros.gz new file mode 100644 index 0000000000..e8ffc40b39 Binary files /dev/null and b/pkg/machine/compression/testdata/sample-withzeros.gz differ diff --git a/pkg/machine/compression/testdata/sample-withzeros.uncompressed b/pkg/machine/compression/testdata/sample-withzeros.uncompressed new file mode 100644 index 0000000000..55c4a8b50d Binary files /dev/null and b/pkg/machine/compression/testdata/sample-withzeros.uncompressed differ diff --git a/pkg/machine/compression/testdata/sample-withzeros.xz b/pkg/machine/compression/testdata/sample-withzeros.xz new file mode 100644 index 0000000000..56f506a1d2 Binary files /dev/null and b/pkg/machine/compression/testdata/sample-withzeros.xz differ diff --git a/pkg/machine/compression/testdata/sample-withzeros.zip b/pkg/machine/compression/testdata/sample-withzeros.zip new file mode 100644 index 0000000000..778730a2f6 Binary files /dev/null and b/pkg/machine/compression/testdata/sample-withzeros.zip differ diff --git a/pkg/machine/compression/testdata/sample-withzeros.zst b/pkg/machine/compression/testdata/sample-withzeros.zst new file mode 100644 index 0000000000..6fac8c6631 Binary files /dev/null and b/pkg/machine/compression/testdata/sample-withzeros.zst differ diff --git a/pkg/machine/compression/testdata/sample.bz2 b/pkg/machine/compression/testdata/sample.bz2 new file mode 100644 index 0000000000..499e27f318 Binary files /dev/null and b/pkg/machine/compression/testdata/sample.bz2 differ diff --git a/pkg/machine/compression/testdata/sample.gz b/pkg/machine/compression/testdata/sample.gz new file mode 100644 index 0000000000..8f9a5e0466 Binary files /dev/null and b/pkg/machine/compression/testdata/sample.gz differ diff --git a/pkg/machine/compression/testdata/sample.uncompressed b/pkg/machine/compression/testdata/sample.uncompressed new file mode 100644 index 0000000000..edfd10ef0d --- /dev/null +++ b/pkg/machine/compression/testdata/sample.uncompressed @@ -0,0 +1 @@ +uncompressed diff --git a/pkg/machine/compression/testdata/sample.xz b/pkg/machine/compression/testdata/sample.xz new file mode 100644 index 0000000000..d791191d35 Binary files /dev/null and b/pkg/machine/compression/testdata/sample.xz differ diff --git a/pkg/machine/compression/testdata/sample.zip b/pkg/machine/compression/testdata/sample.zip new file mode 100644 index 0000000000..a03bcd4bb0 Binary files /dev/null and b/pkg/machine/compression/testdata/sample.zip differ diff --git a/pkg/machine/compression/testdata/sample.zst b/pkg/machine/compression/testdata/sample.zst new file mode 100644 index 0000000000..8fbd12afb0 Binary files /dev/null and b/pkg/machine/compression/testdata/sample.zst differ diff --git a/pkg/machine/compression/uncompressed.go b/pkg/machine/compression/uncompressed.go new file mode 100644 index 0000000000..251a3a9ccc --- /dev/null +++ b/pkg/machine/compression/uncompressed.go @@ -0,0 +1,18 @@ +package compression + +import ( + "io" +) + +type uncompressedDecompressor struct { + genericDecompressor +} + +func newUncompressedDecompressor(compressedFilePath string) (*uncompressedDecompressor, error) { + d, err := newGenericDecompressor(compressedFilePath) + return &uncompressedDecompressor{*d}, err +} + +func (d *uncompressedDecompressor) decompress(w io.WriteSeeker, r io.Reader) error { + return d.sparseOptimizedCopy(w, r) +} diff --git a/pkg/machine/compression/zip.go b/pkg/machine/compression/zip.go new file mode 100644 index 0000000000..7de1f4813c --- /dev/null +++ b/pkg/machine/compression/zip.go @@ -0,0 +1,55 @@ +package compression + +import ( + "archive/zip" + "errors" + "io" + + "github.com/sirupsen/logrus" +) + +type zipDecompressor struct { + genericDecompressor + zipReader *zip.ReadCloser + fileReader io.ReadCloser +} + +func newZipDecompressor(compressedFilePath string) (*zipDecompressor, error) { + d, err := newGenericDecompressor(compressedFilePath) + return &zipDecompressor{*d, nil, nil}, err +} + +// This is the only compressor that doesn't return the compressed file +// stream (zip.OpenReader() provides access to the decompressed file). +// As a result the progress bar shows the decompressed file stream +// but the final size is the compressed file size. +func (d *zipDecompressor) compressedFileReader() (io.ReadCloser, error) { + zipReader, err := zip.OpenReader(d.compressedFilePath) + if err != nil { + return nil, err + } + d.zipReader = zipReader + if len(zipReader.File) != 1 { + return nil, errors.New("machine image files should consist of a single compressed file") + } + z, err := zipReader.File[0].Open() + if err != nil { + return nil, err + } + d.fileReader = z + return z, nil +} + +func (*zipDecompressor) decompress(w io.WriteSeeker, r io.Reader) error { + _, err := io.Copy(w, r) + return err +} + +func (d *zipDecompressor) close() { + if err := d.zipReader.Close(); err != nil { + logrus.Errorf("Unable to close zip file: %q", err) + } + if err := d.fileReader.Close(); err != nil { + logrus.Errorf("Unable to close zip file: %q", err) + } +} diff --git a/pkg/machine/config.go b/pkg/machine/config.go index 650443e82f..25a9aae69b 100644 --- a/pkg/machine/config.go +++ b/pkg/machine/config.go @@ -9,14 +9,14 @@ import ( "net/http" "net/url" "os" - "path/filepath" "strings" "time" + "github.com/containers/common/pkg/strongunits" "github.com/containers/podman/v5/pkg/machine/compression" "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/env" "github.com/containers/podman/v5/pkg/machine/vmconfigs" - "github.com/containers/storage/pkg/homedir" "github.com/sirupsen/logrus" ) @@ -57,8 +57,8 @@ type ListResponse struct { Stream string VMType string CPUs uint64 - Memory uint64 - DiskSize uint64 + Memory strongunits.MiB + DiskSize strongunits.GiB Port int RemoteUsername string IdentityPath string @@ -71,8 +71,9 @@ type SSHOptions struct { } type StartOptions struct { - NoInfo bool - Quiet bool + NoInfo bool + Quiet bool + Rosetta bool } type StopOptions struct{} @@ -107,10 +108,9 @@ type DistributionDownload interface { CleanCache() error } type InspectInfo struct { - ConfigPath define.VMFile + ConfigDir define.VMFile ConnectionInfo ConnectionConfig Created time.Time - Image ImageConfig LastUp time.Time Name string Resources vmconfigs.ResourceConfig @@ -118,147 +118,7 @@ type InspectInfo struct { State define.Status UserModeNetworking bool Rootful bool -} - -// GetCacheDir returns the dir where VM images are downloaded into when pulled -func GetCacheDir(vmType define.VMType) (string, error) { - dataDir, err := GetDataDir(vmType) - if err != nil { - return "", err - } - cacheDir := filepath.Join(dataDir, "cache") - if _, err := os.Stat(cacheDir); !errors.Is(err, os.ErrNotExist) { - return cacheDir, nil - } - return cacheDir, os.MkdirAll(cacheDir, 0755) -} - -// GetDataDir returns the filepath where vm images should -// live for podman-machine. -func GetDataDir(vmType define.VMType) (string, error) { - dataDirPrefix, err := DataDirPrefix() - if err != nil { - return "", err - } - dataDir := filepath.Join(dataDirPrefix, vmType.String()) - if _, err := os.Stat(dataDir); !errors.Is(err, os.ErrNotExist) { - return dataDir, nil - } - mkdirErr := os.MkdirAll(dataDir, 0755) - return dataDir, mkdirErr -} - -// GetGlobalDataDir returns the root of all backends -// for shared machine data. -func GetGlobalDataDir() (string, error) { - dataDir, err := DataDirPrefix() - if err != nil { - return "", err - } - - return dataDir, os.MkdirAll(dataDir, 0755) -} - -func GetMachineDirs(vmType define.VMType) (*define.MachineDirs, error) { - rtDir, err := getRuntimeDir() - if err != nil { - return nil, err - } - - rtDir = filepath.Join(rtDir, "podman") - configDir, err := GetConfDir(vmType) - if err != nil { - return nil, err - } - - configDirFile, err := define.NewMachineFile(configDir, nil) - if err != nil { - return nil, err - } - dataDir, err := GetDataDir(vmType) - if err != nil { - return nil, err - } - - dataDirFile, err := define.NewMachineFile(dataDir, nil) - if err != nil { - return nil, err - } - - imageCacheDir, err := dataDirFile.AppendToNewVMFile("cache", nil) - if err != nil { - return nil, err - } - - rtDirFile, err := define.NewMachineFile(rtDir, nil) - if err != nil { - return nil, err - } - - dirs := define.MachineDirs{ - ConfigDir: configDirFile, - DataDir: dataDirFile, - ImageCacheDir: imageCacheDir, - RuntimeDir: rtDirFile, - } - - // make sure all machine dirs are present - if err := os.MkdirAll(rtDir, 0755); err != nil { - return nil, err - } - if err := os.MkdirAll(configDir, 0755); err != nil { - return nil, err - } - - // Because this is a mkdirall, we make the image cache dir - // which is a subdir of datadir (so the datadir is made anyway) - err = os.MkdirAll(imageCacheDir.GetPath(), 0755) - - return &dirs, err -} - -// DataDirPrefix returns the path prefix for all machine data files -func DataDirPrefix() (string, error) { - data, err := homedir.GetDataHome() - if err != nil { - return "", err - } - dataDir := filepath.Join(data, "containers", "podman", "machine") - return dataDir, nil -} - -// GetConfigDir returns the filepath to where configuration -// files for podman-machine should live -func GetConfDir(vmType define.VMType) (string, error) { - confDirPrefix, err := ConfDirPrefix() - if err != nil { - return "", err - } - confDir := filepath.Join(confDirPrefix, vmType.String()) - if _, err := os.Stat(confDir); !errors.Is(err, os.ErrNotExist) { - return confDir, nil - } - mkdirErr := os.MkdirAll(confDir, 0755) - return confDir, mkdirErr -} - -// ConfDirPrefix returns the path prefix for all machine config files -func ConfDirPrefix() (string, error) { - conf, err := homedir.GetConfigHome() - if err != nil { - return "", err - } - confDir := filepath.Join(conf, "containers", "podman", "machine") - return confDir, nil -} - -// GetSSHIdentityPath returns the path to the expected SSH private key -func GetSSHIdentityPath(name string) (string, error) { - datadir, err := GetGlobalDataDir() - if err != nil { - return "", err - } - return filepath.Join(datadir, name), nil + Rosetta bool } // ImageConfig describes the bootable image for the VM @@ -315,12 +175,12 @@ func (p *Virtualization) VMType() define.VMType { } func (p *Virtualization) NewDownload(vmName string) (Download, error) { - cacheDir, err := GetCacheDir(p.VMType()) + cacheDir, err := env.GetCacheDir(p.VMType()) if err != nil { return Download{}, err } - dataDir, err := GetDataDir(p.VMType()) + dataDir, err := env.GetDataDir(p.VMType()) if err != nil { return Download{}, err } @@ -409,7 +269,7 @@ func WaitAndPingAPI(sock string) { if err == nil { defer resp.Body.Close() } - if err != nil || resp.StatusCode != 200 { + if err != nil || resp.StatusCode != http.StatusOK { logrus.Warn("API socket failed ping test") } } diff --git a/pkg/machine/config_test.go b/pkg/machine/config_test.go index 3450a111f1..f5cb14c314 100644 --- a/pkg/machine/config_test.go +++ b/pkg/machine/config_test.go @@ -6,14 +6,15 @@ import ( "path/filepath" "testing" + "github.com/containers/podman/v5/pkg/machine/env" "github.com/stretchr/testify/assert" ) func TestGetSSHIdentityPath(t *testing.T) { name := "p-test" - datadir, err := GetGlobalDataDir() + datadir, err := env.GetGlobalDataDir() assert.Nil(t, err) - identityPath, err := GetSSHIdentityPath(name) + identityPath, err := env.GetSSHIdentityPath(name) assert.Nil(t, err) assert.Equal(t, identityPath, filepath.Join(datadir, name)) } diff --git a/pkg/machine/connection/add.go b/pkg/machine/connection/add.go index c87550065f..930c63f6ce 100644 --- a/pkg/machine/connection/add.go +++ b/pkg/machine/connection/add.go @@ -1,3 +1,5 @@ +//go:build amd64 || arm64 + package connection import ( @@ -14,10 +16,23 @@ func AddSSHConnectionsToPodmanSocket(uid, port int, identityPath, name, remoteUs fmt.Println("An ignition path was provided. No SSH connection was added to Podman") return nil } + + cons := createConnections(name, uid, port, remoteUsername) + + // The first connection defined when connections is empty will become the default + // regardless of IsDefault, so order according to rootful + if opts.Rootful { + cons[0], cons[1] = cons[1], cons[0] + } + + return addConnection(cons, identityPath, opts.IsDefault) +} + +func createConnections(name string, uid, port int, remoteUsername string) []connection { uri := makeSSHURL(LocalhostIP, fmt.Sprintf("/run/user/%d/podman/podman.sock", uid), strconv.Itoa(port), remoteUsername) uriRoot := makeSSHURL(LocalhostIP, "/run/podman/podman.sock", strconv.Itoa(port), "root") - cons := []connection{ + return []connection{ { name: name, uri: uri, @@ -27,12 +42,4 @@ func AddSSHConnectionsToPodmanSocket(uid, port int, identityPath, name, remoteUs uri: uriRoot, }, } - - // The first connection defined when connections is empty will become the default - // regardless of IsDefault, so order according to rootful - if opts.Rootful { - cons[0], cons[1] = cons[1], cons[0] - } - - return addConnection(cons, identityPath, opts.IsDefault) } diff --git a/pkg/machine/connection/connection.go b/pkg/machine/connection/connection.go index 4c3394a8d2..469dadcb1e 100644 --- a/pkg/machine/connection/connection.go +++ b/pkg/machine/connection/connection.go @@ -55,14 +55,17 @@ func addConnection(cons []connection, identity string, isDefault bool) error { }) } -func ChangeConnectionURI(name string, uri fmt.Stringer) error { +func UpdateConnectionPairPort(name string, port, uid int, remoteUsername string, identityPath string) error { + cons := createConnections(name, uid, port, remoteUsername) return config.EditConnectionConfig(func(cfg *config.ConnectionsFile) error { - dst, ok := cfg.Connection.Connections[name] - if !ok { - return errors.New("connection not found") + for _, con := range cons { + dst := config.Destination{ + IsMachine: true, + URI: con.uri.String(), + Identity: identityPath, + } + cfg.Connection.Connections[con.name] = dst } - dst.URI = uri.String() - cfg.Connection.Connections[name] = dst return nil }) diff --git a/pkg/machine/define/config.go b/pkg/machine/define/config.go index 245080122c..90135f76fd 100644 --- a/pkg/machine/define/config.go +++ b/pkg/machine/define/config.go @@ -5,6 +5,10 @@ import "os" const UserCertsTargetPath = "/etc/containers/certs.d" const DefaultIdentityName = "machine" +// MountTag is an identifier to mount a VirtioFS file system tag on a mount point in the VM. +// Ref: https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta +const MountTag = "rosetta" + var ( DefaultFilePerm os.FileMode = 0644 ) diff --git a/pkg/machine/define/image_format.go b/pkg/machine/define/image_format.go index 9b2766d638..6adbe75c75 100644 --- a/pkg/machine/define/image_format.go +++ b/pkg/machine/define/image_format.go @@ -1,5 +1,7 @@ package define +import "fmt" + type ImageFormat int64 const ( @@ -22,13 +24,9 @@ func (imf ImageFormat) Kind() string { } func (imf ImageFormat) KindWithCompression() string { - switch imf { - case Vhdx: - return "vhdx.zip" - case Tar: + // Tar uses xz; all others use zstd + if imf == Tar { return "tar.xz" - case Raw: - return "raw.gz" } - return "qcow2.xz" + return fmt.Sprintf("%s.zst", imf.Kind()) } diff --git a/pkg/machine/define/image_format_test.go b/pkg/machine/define/image_format_test.go index 50ca6b55d2..8c07a6bbd7 100644 --- a/pkg/machine/define/image_format_test.go +++ b/pkg/machine/define/image_format_test.go @@ -45,19 +45,19 @@ func TestImageFormat_KindWithCompression(t *testing.T) { want string }{ { - name: "vhdx.zip", + name: "vhdx", imf: Vhdx, - want: "vhdx.zip", + want: "vhdx.zst", }, { name: "qcow2", imf: Qcow, - want: "qcow2.xz", + want: "qcow2.zst", }, { - name: "raw.gz", + name: "raw", imf: Raw, - want: "raw.gz", + want: "raw.zst", }, { name: "tar.xz", imf: Tar, diff --git a/pkg/machine/define/initopts.go b/pkg/machine/define/initopts.go index 06bbef8520..fcf922958b 100644 --- a/pkg/machine/define/initopts.go +++ b/pkg/machine/define/initopts.go @@ -6,7 +6,7 @@ type InitOptions struct { CPUS uint64 DiskSize uint64 IgnitionPath string - ImagePath string + Image string Volumes []string VolumeDriver string IsDefault bool diff --git a/pkg/machine/define/setopts.go b/pkg/machine/define/setopts.go index 4f6ba24489..6b00478ec0 100644 --- a/pkg/machine/define/setopts.go +++ b/pkg/machine/define/setopts.go @@ -5,7 +5,7 @@ import "github.com/containers/common/pkg/strongunits" type SetOptions struct { CPUs *uint64 DiskSize *strongunits.GiB - Memory *uint64 + Memory *strongunits.MiB Rootful *bool UserModeNetworking *bool USBs *[]string diff --git a/pkg/machine/define/vmfile.go b/pkg/machine/define/vmfile.go index 1795a4dc5a..261cc78226 100644 --- a/pkg/machine/define/vmfile.go +++ b/pkg/machine/define/vmfile.go @@ -2,6 +2,7 @@ package define import ( "errors" + "io" "os" "path/filepath" "strconv" @@ -48,6 +49,22 @@ func (m *VMFile) Read() ([]byte, error) { return os.ReadFile(m.GetPath()) } +// Read the first n bytes of a given file and return in []bytes +func (m *VMFile) ReadMagicNumber(n int) ([]byte, error) { + f, err := os.Open(m.GetPath()) + if err != nil { + return nil, err + } + defer f.Close() + b := make([]byte, n) + n, err = io.ReadFull(f, b) + if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF { + return b[:n], err + } else { + return b[:n], nil + } +} + // ReadPIDFrom a file and return as int. -1 means the pid file could not // be read or had something that could not be converted to an int in it func (m *VMFile) ReadPIDFrom() (int, error) { diff --git a/pkg/machine/define/vmtype.go b/pkg/machine/define/vmtype.go index 6ae701bacb..e96748cb96 100644 --- a/pkg/machine/define/vmtype.go +++ b/pkg/machine/define/vmtype.go @@ -12,6 +12,7 @@ const ( WSLVirt AppleHvVirt HyperVVirt + LibKrun UnknownVirt ) @@ -22,6 +23,7 @@ const ( qemu = "qemu" appleHV = "applehv" hyperV = "hyperv" + libkrun = "libkrun" ) func (v VMType) String() string { @@ -32,6 +34,23 @@ func (v VMType) String() string { return appleHV case HyperVVirt: return hyperV + case LibKrun: + return libkrun + } + return qemu +} + +// DiskType returns a string representation that matches the OCI artifact +// type on the container image registry +func (v VMType) DiskType() string { + switch v { + case WSLVirt: + return wsl + // Both AppleHV and Libkrun use same raw disk flavor + case AppleHvVirt, LibKrun: + return appleHV + case HyperVVirt: + return hyperV } return qemu } @@ -44,6 +63,8 @@ func (v VMType) ImageFormat() ImageFormat { return Raw case HyperVVirt: return Vhdx + case LibKrun: + return Raw } return Qcow } @@ -56,6 +77,8 @@ func ParseVMType(input string, emptyFallback VMType) (VMType, error) { return WSLVirt, nil case appleHV: return AppleHvVirt, nil + case libkrun: + return LibKrun, nil case hyperV: return HyperVVirt, nil case "": diff --git a/pkg/machine/e2e/README.md b/pkg/machine/e2e/README.md index 4b737b686b..48e12612f0 100644 --- a/pkg/machine/e2e/README.md +++ b/pkg/machine/e2e/README.md @@ -9,7 +9,7 @@ Note: you must not have any machines defined before running tests ## Microsoft Windows -### HyperV +### Hyper-V 1. Open a powershell as admin 1. $env:CONTAINERS_MACHINE_PROVIDER="hyperv" @@ -28,9 +28,11 @@ Note: To run specific test files, add the test files to the end of the winmake c `./winmake localmachine "basic_test.go start_test.go"` -## MacOS +## macOS ### Apple Hypervisor 1. `make podman-remote` -1. `make localmachine` (Add `FOCUS_FILE=basic_test.go` to only run basic test) +1. `make localmachine` (Add `FOCUS_FILE=basic_test.go` to only run basic test. Or add `FOCUS="simple init with start"` to only run one test case) + +Note: On macOS, an error will occur if the path length of `$TMPDIR` is longer than 22 characters. Please set the appropriate path to `$TMPDIR`. Also, if `$TMPDIR` is empty, `/private/tmp` will be set. diff --git a/pkg/machine/e2e/basic_test.go b/pkg/machine/e2e/basic_test.go index ab5c5c6312..79d97eaa12 100644 --- a/pkg/machine/e2e/basic_test.go +++ b/pkg/machine/e2e/basic_test.go @@ -1,36 +1,30 @@ package e2e_test import ( + "fmt" "io" "net" "net/http" "net/url" + "os" + "path" + "path/filepath" "time" + "github.com/containers/podman/v5/pkg/machine/define" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" . "github.com/onsi/gomega/gexec" ) var _ = Describe("run basic podman commands", func() { - var ( - mb *machineTestBuilder - testDir string - ) - - BeforeEach(func() { - testDir, mb = setup() - }) - AfterEach(func() { - teardown(originalHomeDir, testDir, mb) - }) It("Basic ops", func() { // golangci-lint has trouble with actually skipping tests marked Skip // so skip it on cirrus envs and where CIRRUS_CI isn't set. name := randomString() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath).withNow()).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath).withNow()).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -50,15 +44,83 @@ var _ = Describe("run basic podman commands", func() { Expect(runAlp).To(Exit(0)) Expect(runAlp.outputToString()).To(ContainSubstring("Alpine Linux")) + contextDir := GinkgoT().TempDir() + cfile := filepath.Join(contextDir, "Containerfile") + err = os.WriteFile(cfile, []byte("FROM quay.io/libpod/alpine_nginx\nRUN ip addr\n"), 0o644) + Expect(err).ToNot(HaveOccurred()) + + build, err := mb.setCmd(bm.withPodmanCommand([]string{"build", contextDir})).run() + Expect(err).ToNot(HaveOccurred()) + Expect(build).To(Exit(0)) + Expect(build.outputToString()).To(ContainSubstring("COMMIT")) + rmCon, err := mb.setCmd(bm.withPodmanCommand([]string{"rm", "-a"})).run() Expect(err).ToNot(HaveOccurred()) Expect(rmCon).To(Exit(0)) }) + It("Volume ops", func() { + skipIfVmtype(define.HyperVVirt, "FIXME: #21036 - Hyper-V podman run -v fails due to path translation issues") + + tDir, err := filepath.Abs(GinkgoT().TempDir()) + Expect(err).ToNot(HaveOccurred()) + roFile := filepath.Join(tDir, "attr-test-file") + + // Create the file as ready-only, since some platforms disallow selinux attr writes + // The subsequent Z mount should still succeed in spite of that + rf, err := os.OpenFile(roFile, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0o444) + Expect(err).ToNot(HaveOccurred()) + rf.Close() + + name := randomString() + i := new(initMachine).withImage(mb.imagePath).withNow() + + // All other platforms have an implicit mount for the temp area + if isVmtype(define.QemuVirt) { + i.withVolume(tDir) + } + session, err := mb.setName(name).setCmd(i).run() + Expect(err).ToNot(HaveOccurred()) + Expect(session).To(Exit(0)) + + bm := basicMachine{} + // Test relabel works on all platforms + runAlp, err := mb.setCmd(bm.withPodmanCommand([]string{"run", "-v", tDir + ":/test:Z", "quay.io/libpod/alpine_nginx", "ls", "/test/attr-test-file"})).run() + Expect(err).ToNot(HaveOccurred()) + Expect(runAlp).To(Exit(0)) + }) + + It("Volume should be virtiofs", func() { + // In theory this could run on MacOS too, but we know virtiofs works for that now, + // this is just testing linux + skipIfNotVmtype(define.QemuVirt, "This is just adding coverage for virtiofs on linux") + + tDir, err := filepath.Abs(GinkgoT().TempDir()) + Expect(err).ToNot(HaveOccurred()) + + err = os.WriteFile(filepath.Join(tDir, "testfile"), []byte("some test contents"), 0o644) + Expect(err).ToNot(HaveOccurred()) + + name := randomString() + i := new(initMachine).withImage(mb.imagePath).withNow() + + // Ensure that this is a volume, it may not be automatically on qemu + i.withVolume(tDir) + session, err := mb.setName(name).setCmd(i).run() + Expect(err).ToNot(HaveOccurred()) + Expect(session).To(Exit(0)) + + ssh := new(sshMachine).withSSHCommand([]string{"findmnt", "-no", "FSTYPE", tDir}) + findmnt, err := mb.setName(name).setCmd(ssh).run() + Expect(err).ToNot(HaveOccurred()) + Expect(findmnt).To(Exit(0)) + Expect(findmnt.outputToString()).To(ContainSubstring("virtiofs")) + }) + It("Podman ops with port forwarding and gvproxy", func() { name := randomString() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath).withNow()).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath).withNow()).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -93,6 +155,31 @@ var _ = Describe("run basic podman commands", func() { Expect(out).ToNot(ContainSubstring("gvproxy")) }) + It("podman volume on non-standard path", func() { + skipIfWSL("Requires standard volume handling") + dir, err := os.MkdirTemp("", "machine-volume") + Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(dir) + + testString := "abcdefg1234567" + testFile := "testfile" + err = os.WriteFile(filepath.Join(dir, testFile), []byte(testString), 0644) + Expect(err).ToNot(HaveOccurred()) + + name := randomString() + machinePath := "/does/not/exist" + init := new(initMachine).withVolume(fmt.Sprintf("%s:%s", dir, machinePath)).withImage(mb.imagePath).withNow() + session, err := mb.setName(name).setCmd(init).run() + Expect(err).ToNot(HaveOccurred()) + Expect(session).To(Exit(0)) + + // Must use path.Join to ensure forward slashes are used, even on Windows. + ssh := new(sshMachine).withSSHCommand([]string{"cat", path.Join(machinePath, testFile)}) + ls, err := mb.setName(name).setCmd(ssh).run() + Expect(err).ToNot(HaveOccurred()) + Expect(ls).To(Exit(0)) + Expect(ls.outputToString()).To(ContainSubstring(testString)) + }) }) func testHTTPServer(port string, shouldErr bool, expectedResponse string) { diff --git a/pkg/machine/e2e/config_init_test.go b/pkg/machine/e2e/config_init_test.go index bb6833fd2d..9d33e2d3d2 100644 --- a/pkg/machine/e2e/config_init_test.go +++ b/pkg/machine/e2e/config_init_test.go @@ -2,6 +2,11 @@ package e2e_test import ( "strconv" + "strings" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gexec" ) type initMachine struct { @@ -23,7 +28,7 @@ type initMachine struct { diskSize *uint ignitionPath string username string - imagePath string + image string memory *uint now bool timezone string @@ -50,8 +55,8 @@ func (i *initMachine) buildCmd(m *machineTestBuilder) []string { if l := len(i.username); l > 0 { cmd = append(cmd, "--username", i.username) } - if l := len(i.imagePath); l > 0 { - cmd = append(cmd, "--image-path", i.imagePath) + if l := len(i.image); l > 0 { + cmd = append(cmd, "--image", i.image) } if i.memory != nil { cmd = append(cmd, "--memory", strconv.Itoa(int(*i.memory))) @@ -71,7 +76,24 @@ func (i *initMachine) buildCmd(m *machineTestBuilder) []string { if i.userModeNetworking { cmd = append(cmd, "--user-mode-networking") } - cmd = append(cmd, m.name) + name := m.name + cmd = append(cmd, name) + + // when we create a new VM remove it again as cleanup + DeferCleanup(func() { + r := new(rmMachine) + session, err := m.setName(name).setCmd(r.withForce()).run() + Expect(err).ToNot(HaveOccurred(), "error occurred rm'ing machine") + // Some test create a invalid VM so the VM does not exists in this case we have to ignore the error. + // It would be much better if rm -f would behave like other commands and ignore not exists errors. + if session.ExitCode() == 125 { + if strings.Contains(session.errorToString(), "VM does not exist") { + return + } + } + Expect(session).To(Exit(0)) + }) + i.cmd = cmd return cmd } @@ -95,8 +117,8 @@ func (i *initMachine) withUsername(username string) *initMachine { return i } -func (i *initMachine) withImagePath(path string) *initMachine { - i.imagePath = path +func (i *initMachine) withImage(path string) *initMachine { + i.image = path return i } diff --git a/pkg/machine/e2e/config_reset_test.go b/pkg/machine/e2e/config_reset_test.go index 2bba5b92b4..23974a109a 100644 --- a/pkg/machine/e2e/config_reset_test.go +++ b/pkg/machine/e2e/config_reset_test.go @@ -2,7 +2,7 @@ package e2e_test type resetMachine struct { /* - -f, --force Stop and do not prompt before reseting + -f, --force Stop and do not prompt before resetting */ force bool diff --git a/pkg/machine/e2e/config_system_connection_list_test.go b/pkg/machine/e2e/config_system_connection_list_test.go new file mode 100644 index 0000000000..cc52745e2d --- /dev/null +++ b/pkg/machine/e2e/config_system_connection_list_test.go @@ -0,0 +1,23 @@ +package e2e_test + +type listSystemConnection struct { + /* + --format string Custom Go template for printing connections + */ + + format string +} + +func (l listSystemConnection) buildCmd(m *machineTestBuilder) []string { + cmd := []string{"system", "connection", "list"} + if len(l.format) > 0 { + cmd = append(cmd, "--format", l.format) + } + + return cmd +} + +func (l *listSystemConnection) withFormat(format string) *listSystemConnection { + l.format = format + return l +} diff --git a/pkg/machine/e2e/config_test.go b/pkg/machine/e2e/config_test.go index 0e4e0b4e96..ddfeb16662 100644 --- a/pkg/machine/e2e/config_test.go +++ b/pkg/machine/e2e/config_test.go @@ -6,6 +6,7 @@ import ( "os" "os/exec" "path/filepath" + "slices" "strconv" "strings" "time" @@ -18,7 +19,6 @@ import ( "github.com/onsi/gomega/format" . "github.com/onsi/gomega/gexec" "github.com/onsi/gomega/types" - "golang.org/x/exp/slices" ) var originalHomeDir = os.Getenv("HOME") @@ -236,7 +236,9 @@ func isWSL() bool { return isVmtype(define.WSLVirt) } -//nolint:unused +// Only used on Windows +// +//nolint:unparam,unused func runSystemCommand(binary string, cmdArgs []string, timeout time.Duration, wait bool) (*machineSession, error) { GinkgoWriter.Println(binary + " " + strings.Join(cmdArgs, " ")) c := exec.Command(binary, cmdArgs...) diff --git a/pkg/machine/e2e/info_test.go b/pkg/machine/e2e/info_test.go index 19bcfd7246..7f3af2516e 100644 --- a/pkg/machine/e2e/info_test.go +++ b/pkg/machine/e2e/info_test.go @@ -11,17 +11,6 @@ import ( ) var _ = Describe("podman machine info", func() { - var ( - mb *machineTestBuilder - testDir string - ) - - BeforeEach(func() { - testDir, mb = setup() - }) - AfterEach(func() { - teardown(originalHomeDir, testDir, mb) - }) It("machine info", func() { info := new(infoMachine) @@ -39,7 +28,7 @@ var _ = Describe("podman machine info", func() { // Create a machine and check if info has been updated i := new(initMachine) - initSession, err := mb.setCmd(i.withImagePath(mb.imagePath)).run() + initSession, err := mb.setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(initSession).To(Exit(0)) diff --git a/pkg/machine/e2e/init_test.go b/pkg/machine/e2e/init_test.go index 83f0b5c9a9..63c0336e97 100644 --- a/pkg/machine/e2e/init_test.go +++ b/pkg/machine/e2e/init_test.go @@ -9,33 +9,23 @@ import ( "strings" "time" + "github.com/containers/common/pkg/strongunits" "github.com/containers/podman/v5/pkg/machine/define" "github.com/containers/podman/v5/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" . "github.com/onsi/gomega/gexec" + "github.com/shirou/gopsutil/v3/mem" "github.com/sirupsen/logrus" ) var _ = Describe("podman machine init", func() { - var ( - mb *machineTestBuilder - testDir string - ) - - BeforeEach(func() { - testDir, mb = setup() - }) - AfterEach(func() { - teardown(originalHomeDir, testDir, mb) - }) - cpus := runtime.NumCPU() / 2 if cpus == 0 { cpus = 1 } - It("bad init name", func() { + It("bad init", func() { i := initMachine{} reallyLongName := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" session, err := mb.setName(reallyLongName).setCmd(&i).run() @@ -61,7 +51,7 @@ var _ = Describe("podman machine init", func() { bi := new(initMachine) want := fmt.Sprintf("system connection \"%s\" already exists", badName) - badInit, berr := mb.setName(badName).setCmd(bi.withImagePath(mb.imagePath)).run() + badInit, berr := mb.setName(badName).setCmd(bi.withImage(mb.imagePath)).run() Expect(berr).ToNot(HaveOccurred()) Expect(badInit).To(Exit(125)) Expect(badInit.errorToString()).To(ContainSubstring(want)) @@ -77,11 +67,21 @@ var _ = Describe("podman machine init", func() { Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(125)) Expect(session.errorToString()).To(ContainSubstring(`invalid username "-/a": names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*: invalid argument`)) + + // this comes in bytes + memStat, err := mem.VirtualMemory() + Expect(err).ToNot(HaveOccurred()) + total := strongunits.ToMib(strongunits.B(memStat.Total)) + 1024 + + badMem := initMachine{} + badMemSession, err := mb.setCmd(badMem.withMemory(uint(total))).run() + Expect(err).ToNot(HaveOccurred()) + Expect(badMemSession).To(Exit(125)) }) It("simple init", func() { i := new(initMachine) - session, err := mb.setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -94,13 +94,13 @@ var _ = Describe("podman machine init", func() { Expect(testMachine.Name).To(Equal(mb.names[0])) if testProvider.VMType() != define.WSLVirt { // WSL hardware specs are hardcoded Expect(testMachine.Resources.CPUs).To(Equal(uint64(cpus))) - Expect(testMachine.Resources.Memory).To(Equal(uint64(2048))) + Expect(testMachine.Resources.Memory).To(BeEquivalentTo(uint64(2048))) } }) It("simple init with start", func() { i := initMachine{} - session, err := mb.setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -137,7 +137,7 @@ var _ = Describe("podman machine init", func() { It("simple init with username", func() { i := new(initMachine) remoteUsername := "remoteuser" - session, err := mb.setCmd(i.withImagePath(mb.imagePath).withUsername(remoteUsername)).run() + session, err := mb.setCmd(i.withImage(mb.imagePath).withUsername(remoteUsername)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -150,7 +150,7 @@ var _ = Describe("podman machine init", func() { Expect(testMachine.Name).To(Equal(mb.names[0])) if testProvider.VMType() != define.WSLVirt { // memory and cpus something we cannot set with WSL Expect(testMachine.Resources.CPUs).To(Equal(uint64(cpus))) - Expect(testMachine.Resources.Memory).To(Equal(uint64(2048))) + Expect(testMachine.Resources.Memory).To(BeEquivalentTo(uint64(2048))) } Expect(testMachine.SSHConfig.RemoteUsername).To(Equal(remoteUsername)) @@ -160,7 +160,7 @@ var _ = Describe("podman machine init", func() { skipIfWSL("setting hardware resource numbers and timezone are not supported on WSL") name := randomString() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath).withCPUs(2).withDiskSize(102).withMemory(4096).withTimezone("Pacific/Honolulu")).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath).withCPUs(2).withDiskSize(102).withMemory(4096).withTimezone("Pacific/Honolulu")).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -207,17 +207,18 @@ var _ = Describe("podman machine init", func() { Expect(err).ToNot(HaveOccurred()) _, err = os.CreateTemp(tmpDir, "example") Expect(err).ToNot(HaveOccurred()) - mount := tmpDir + ":/testmountdir" + // Test long target path, see https://github.com/containers/podman/issues/22226 + mount := tmpDir + ":/very-long-test-mount-dir-path-more-than-thirty-six-bytes" defer func() { _ = utils.GuardedRemoveAll(tmpDir) }() name := randomString() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath).withVolume(mount).withNow()).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath).withVolume(mount).withNow()).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) ssh := sshMachine{} - sshSession, err := mb.setName(name).setCmd(ssh.withSSHCommand([]string{"ls /testmountdir"})).run() + sshSession, err := mb.setName(name).setCmd(ssh.withSSHCommand([]string{"ls /very-long-test-mount-dir-path-more-than-thirty-six-bytes"})).run() Expect(err).ToNot(HaveOccurred()) Expect(sshSession).To(Exit(0)) Expect(sshSession.outputToString()).To(ContainSubstring("example")) @@ -226,7 +227,7 @@ var _ = Describe("podman machine init", func() { It("machine init rootless docker.sock check", func() { i := initMachine{} name := randomString() - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -249,7 +250,7 @@ var _ = Describe("podman machine init", func() { It("machine init rootful with docker.sock check", func() { i := initMachine{} name := randomString() - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath).withRootful(true)).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath).withRootful(true)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -276,27 +277,17 @@ var _ = Describe("podman machine init", func() { It("init should cleanup on failure", func() { i := new(initMachine) name := randomString() - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) inspect := new(inspectMachine) - inspect = inspect.withFormat("{{.ConfigPath.Path}}") + inspect = inspect.withFormat("{{.ConfigDir.Path}}") inspectSession, err := mb.setCmd(inspect).run() Expect(err).ToNot(HaveOccurred()) cfgpth := filepath.Join(inspectSession.outputToString(), fmt.Sprintf("%s.json", name)) - inspect = inspect.withFormat("{{.Image.IgnitionFile.Path}}") - inspectSession, err = mb.setCmd(inspect).run() - Expect(err).ToNot(HaveOccurred()) - ign := inspectSession.outputToString() - - inspect = inspect.withFormat("{{.Image.ImagePath.Path}}") - inspectSession, err = mb.setCmd(inspect).run() - Expect(err).ToNot(HaveOccurred()) - img := inspectSession.outputToString() - rm := rmMachine{} removeSession, err := mb.setCmd(rm.withForce()).run() Expect(err).ToNot(HaveOccurred()) @@ -313,17 +304,21 @@ var _ = Describe("podman machine init", func() { // Bad ignition path - init fails i = new(initMachine) i.ignitionPath = "/bad/path" - session, err = mb.setName(name).setCmd(i.withImagePath(mb.imagePath)).run() + session, err = mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(125)) - // ensure files created by init are cleaned up on init failure - _, err = os.Stat(img) + imageSuffix := mb.imagePath[strings.LastIndex(mb.imagePath, "/")+1:] + imgPath := filepath.Join(testDir, ".local", "share", "containers", "podman", "machine", "qemu", mb.name+"_"+imageSuffix) + _, err = os.Stat(imgPath) Expect(err).To(HaveOccurred()) + + cfgDir := filepath.Join(testDir, ".config", "containers", "podman", "machine", testProvider.VMType().String()) _, err = os.Stat(cfgpth) Expect(err).To(HaveOccurred()) - _, err = os.Stat(ign) + ignPath := filepath.Join(cfgDir, mb.name+".ign") + _, err = os.Stat(ignPath) Expect(err).To(HaveOccurred()) } }) @@ -361,7 +356,7 @@ var _ = Describe("podman machine init", func() { // We should be able to init with a bad config present i := new(initMachine) name := randomString() - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -375,6 +370,100 @@ var _ = Describe("podman machine init", func() { Expect(err).ToNot(HaveOccurred()) Expect(inspectShouldPass).To(Exit(0)) }) + + It("machine init with rosetta=true", func() { + skipIfVmtype(define.QemuVirt, "Test is only for AppleHv") + skipIfVmtype(define.WSLVirt, "Test is only for AppleHv") + skipIfVmtype(define.HyperVVirt, "Test is only for AppleHv") + skipIfVmtype(define.LibKrun, "Test is only for AppleHv") + if runtime.GOARCH != "arm64" { + Skip("Test is only for AppleHv with arm64 architecture") + } + + i := initMachine{} + name := randomString() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() + Expect(err).ToNot(HaveOccurred()) + Expect(session).To(Exit(0)) + + s := startMachine{} + ssession, err := mb.setCmd(s).setTimeout(time.Minute * 10).run() + Expect(err).ToNot(HaveOccurred()) + Expect(ssession).Should(Exit(0)) + + inspect := new(inspectMachine) + inspect = inspect.withFormat("{{.Rosetta}}") + inspectSession, err := mb.setName(name).setCmd(inspect).run() + Expect(err).ToNot(HaveOccurred()) + Expect(inspectSession).To(Exit(0)) + Expect(inspectSession.outputToString()).To(Equal("true")) + + mnt := sshMachine{} + mntSession, err := mb.setName(name).setCmd(mnt.withSSHCommand([]string{"ls -d /mnt/rosetta"})).run() + Expect(err).ToNot(HaveOccurred()) + Expect(mntSession).To(Exit(0)) + Expect(mntSession.outputToString()).To(ContainSubstring("/mnt/rosetta")) + + proc := sshMachine{} + procSession, err := mb.setName(name).setCmd(proc.withSSHCommand([]string{"ls -d /proc/sys/fs/binfmt_misc/rosetta"})).run() + Expect(err).ToNot(HaveOccurred()) + Expect(procSession).To(Exit(0)) + Expect(procSession.outputToString()).To(ContainSubstring("/proc/sys/fs/binfmt_misc/rosetta")) + + proc2 := sshMachine{} + proc2Session, err := mb.setName(name).setCmd(proc2.withSSHCommand([]string{"ls -d /proc/sys/fs/binfmt_misc/qemu-x86_64"})).run() + Expect(err).ToNot(HaveOccurred()) + Expect(proc2Session.ExitCode()).To(Equal(2)) + }) + + It("machine init with rosetta=false", func() { + skipIfVmtype(define.QemuVirt, "Test is only for AppleHv") + skipIfVmtype(define.WSLVirt, "Test is only for AppleHv") + skipIfVmtype(define.HyperVVirt, "Test is only for AppleHv") + skipIfVmtype(define.LibKrun, "Test is only for AppleHv") + if runtime.GOARCH != "arm64" { + Skip("Test is only for AppleHv with arm64 architecture") + } + configDir := filepath.Join(testDir, ".config", "containers") + err := os.MkdirAll(configDir, 0755) + Expect(err).ToNot(HaveOccurred()) + + err = os.WriteFile(filepath.Join(configDir, "containers.conf"), rosettaConfig, 0644) + Expect(err).ToNot(HaveOccurred()) + + i := initMachine{} + name := randomString() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() + Expect(err).ToNot(HaveOccurred()) + Expect(session).To(Exit(0)) + + s := startMachine{} + ssession, err := mb.setCmd(s).setTimeout(time.Minute * 10).run() + Expect(err).ToNot(HaveOccurred()) + Expect(ssession).Should(Exit(0)) + + inspect := new(inspectMachine) + inspect = inspect.withFormat("{{.Rosetta}}") + inspectSession, err := mb.setName(name).setCmd(inspect).run() + Expect(err).ToNot(HaveOccurred()) + Expect(inspectSession).To(Exit(0)) + Expect(inspectSession.outputToString()).To(Equal("false")) + + mnt := sshMachine{} + mntSession, err := mb.setName(name).setCmd(mnt.withSSHCommand([]string{"ls -d /mnt/rosetta"})).run() + Expect(err).ToNot(HaveOccurred()) + Expect(mntSession.ExitCode()).To(Equal(2)) + + proc := sshMachine{} + procSession, err := mb.setName(name).setCmd(proc.withSSHCommand([]string{"ls -d /proc/sys/fs/binfmt_misc/rosetta"})).run() + Expect(err).ToNot(HaveOccurred()) + Expect(procSession.ExitCode()).To(Equal(2)) + + proc2 := sshMachine{} + proc2Session, err := mb.setName(name).setCmd(proc2.withSSHCommand([]string{"ls -d /proc/sys/fs/binfmt_misc/qemu-x86_64"})).run() + Expect(err).ToNot(HaveOccurred()) + Expect(proc2Session.outputToString()).To(ContainSubstring("/proc/sys/fs/binfmt_misc/qemu-x86_64")) + }) }) var p4Config = []byte(`{ @@ -460,3 +549,8 @@ var p4Config = []byte(`{ "LastUp": "0001-01-01T00:00:00Z" } `) + +var rosettaConfig = []byte(` +[machine] +rosetta=false +`) diff --git a/pkg/machine/e2e/init_windows_test.go b/pkg/machine/e2e/init_windows_test.go index 92acd7f8d4..238fe8467f 100644 --- a/pkg/machine/e2e/init_windows_test.go +++ b/pkg/machine/e2e/init_windows_test.go @@ -15,17 +15,6 @@ import ( ) var _ = Describe("podman machine init - windows only", func() { - var ( - mb *machineTestBuilder - testDir string - ) - - BeforeEach(func() { - testDir, mb = setup() - }) - AfterEach(func() { - teardown(originalHomeDir, testDir, mb) - }) It("init with user mode networking", func() { if testProvider.VMType() != define.WSLVirt { @@ -33,17 +22,17 @@ var _ = Describe("podman machine init - windows only", func() { } i := new(initMachine) name := randomString() - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath).withUserModeNetworking(true)).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath).withUserModeNetworking(true)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) defer func() { - _, err := runSystemCommand(wutil.FindWSL(), []string{"--terminate", "podman-net-usermode"}, defaultTimeout, true) + _, err := runSystemCommand(wutil.FindWSL(), []string{"--terminate", "podman-net-usermode"}, defaultTimeout, true) if err != nil { fmt.Println("unable to terminate podman-net-usermode") } - _, err = runSystemCommand(wutil.FindWSL(), []string{"--unregister", "podman-net-usermode"}, defaultTimeout, true) + _, err = runSystemCommand(wutil.FindWSL(), []string{"--unregister", "podman-net-usermode"}, defaultTimeout, true) if err != nil { fmt.Println("unable to unregister podman-net-usermode") } @@ -86,7 +75,8 @@ var _ = Describe("podman machine init - windows only", func() { } }() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() + Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(125)) Expect(session.errorToString()).To(ContainSubstring("already exists on hypervisor")) }) @@ -103,7 +93,7 @@ var _ = Describe("podman machine init - windows only", func() { // create a bogus machine i := new(initMachine) - session, err := mb.setName("foobarexport").setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setName("foobarexport").setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -128,7 +118,7 @@ var _ = Describe("podman machine init - windows only", func() { }() // Trying to make a vm with the same name as an existing name should result in a 125 - checkSession, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath)).run() + checkSession, err := mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(checkSession).To(Exit(125)) }) diff --git a/pkg/machine/e2e/inspect_test.go b/pkg/machine/e2e/inspect_test.go index 6aeaf6da43..1c6224d793 100644 --- a/pkg/machine/e2e/inspect_test.go +++ b/pkg/machine/e2e/inspect_test.go @@ -2,6 +2,7 @@ package e2e_test import ( "github.com/containers/podman/v5/pkg/machine" + "github.com/containers/podman/v5/pkg/machine/define" jsoniter "github.com/json-iterator/go" . "github.com/onsi/ginkgo/v2" @@ -10,17 +11,6 @@ import ( ) var _ = Describe("podman inspect stop", func() { - var ( - mb *machineTestBuilder - testDir string - ) - - BeforeEach(func() { - testDir, mb = setup() - }) - AfterEach(func() { - teardown(originalHomeDir, testDir, mb) - }) It("inspect bad name", func() { i := inspectMachine{} @@ -32,12 +22,12 @@ var _ = Describe("podman inspect stop", func() { It("inspect two machines", func() { i := new(initMachine) - foo1, err := mb.setName("foo1").setCmd(i.withImagePath(mb.imagePath)).run() + foo1, err := mb.setName("foo1").setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(foo1).To(Exit(0)) ii := new(initMachine) - foo2, err := mb.setName("foo2").setCmd(ii.withImagePath(mb.imagePath)).run() + foo2, err := mb.setName("foo2").setCmd(ii.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(foo2).To(Exit(0)) @@ -52,7 +42,7 @@ var _ = Describe("podman inspect stop", func() { It("inspect with go format", func() { name := randomString() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -66,13 +56,12 @@ var _ = Describe("podman inspect stop", func() { err = jsoniter.Unmarshal(inspectSession.Bytes(), &inspectInfo) Expect(err).ToNot(HaveOccurred()) - // TODO Re-enable this for tests once inspect is fixed - // switch testProvider.VMType() { - // case define.WSLVirt: - // Expect(inspectInfo[0].ConnectionInfo.PodmanPipe.GetPath()).To(ContainSubstring("podman-")) - // default: - // Expect(inspectInfo[0].ConnectionInfo.PodmanSocket.GetPath()).To(HaveSuffix("podman.sock")) - // } + switch testProvider.VMType() { + case define.HyperVVirt, define.WSLVirt: + Expect(inspectInfo[0].ConnectionInfo.PodmanPipe.GetPath()).To(ContainSubstring("podman-")) + default: + Expect(inspectInfo[0].ConnectionInfo.PodmanSocket.GetPath()).To(HaveSuffix("api.sock")) + } inspect := new(inspectMachine) inspect = inspect.withFormat("{{.Name}}") @@ -89,4 +78,31 @@ var _ = Describe("podman inspect stop", func() { Expect(inspectSession).To(Exit(125)) Expect(inspectSession.errorToString()).To(ContainSubstring("can't evaluate field Abcde in type machine.InspectInfo")) }) + + It("inspect shows a unique socket name per machine", func() { + skipIfVmtype(define.WSLVirt, "test is only relevant for Unix based providers") + skipIfVmtype(define.HyperVVirt, "test is only relevant for Unix based machines") + + var socks []string + for c := 0; c < 2; c++ { + name := randomString() + i := new(initMachine) + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() + Expect(err).ToNot(HaveOccurred()) + Expect(session).To(Exit(0)) + + // regular inspect should + inspectJSON := new(inspectMachine) + inspectSession, err := mb.setName(name).setCmd(inspectJSON).run() + Expect(err).ToNot(HaveOccurred()) + Expect(inspectSession).To(Exit(0)) + + var inspectInfo []machine.InspectInfo + err = jsoniter.Unmarshal(inspectSession.Bytes(), &inspectInfo) + Expect(err).ToNot(HaveOccurred()) + socks = append(socks, inspectInfo[0].ConnectionInfo.PodmanSocket.GetPath()) + } + + Expect(socks[0]).ToNot(Equal(socks[1])) + }) }) diff --git a/pkg/machine/e2e/list_test.go b/pkg/machine/e2e/list_test.go index 54d976fd7f..4b2d738869 100644 --- a/pkg/machine/e2e/list_test.go +++ b/pkg/machine/e2e/list_test.go @@ -1,6 +1,8 @@ package e2e_test import ( + "slices" + "strconv" "strings" "time" @@ -9,21 +11,9 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" . "github.com/onsi/gomega/gexec" - "golang.org/x/exp/slices" ) var _ = Describe("podman machine list", func() { - var ( - mb *machineTestBuilder - testDir string - ) - - BeforeEach(func() { - testDir, mb = setup() - }) - AfterEach(func() { - teardown(originalHomeDir, testDir, mb) - }) It("list machine", func() { list := new(listMachine) @@ -33,7 +23,7 @@ var _ = Describe("podman machine list", func() { Expect(firstList.outputToStringSlice()).To(HaveLen(1)) // just the header i := new(initMachine) - session, err := mb.setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -60,11 +50,11 @@ var _ = Describe("podman machine list", func() { Expect(noheaderSession.outputToStringSlice()).To(BeEmpty()) i := new(initMachine) - session, err := mb.setName(name1).setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setName(name1).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) - session2, err := mb.setName(name2).setCmd(i.withImagePath(mb.imagePath)).run() + session2, err := mb.setName(name2).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session2).To(Exit(0)) @@ -82,7 +72,7 @@ var _ = Describe("podman machine list", func() { It("list machine: check if running while starting", func() { skipIfWSL("the below logic does not work on WSL. #20978") i := new(initMachine) - session, err := mb.setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -123,7 +113,7 @@ var _ = Describe("podman machine list", func() { name1 := randomString() i := new(initMachine) - session, err := mb.setName(name1).setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setName(name1).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -158,6 +148,40 @@ var _ = Describe("podman machine list", func() { listNames3 := listSession3.outputToStringSlice() Expect(listNames3).To(HaveLen(2)) }) + It("list machine in machine-readable byte format", func() { + i := new(initMachine) + session, err := mb.setCmd(i.withImage(mb.imagePath)).run() + Expect(err).ToNot(HaveOccurred()) + Expect(session).To(Exit(0)) + + list := new(listMachine) + list = list.withFormat(("json")) + listSession, err := mb.setCmd(list).run() + Expect(err).NotTo(HaveOccurred()) + var listResponse []*entities.ListReporter + err = jsoniter.Unmarshal(listSession.Bytes(), &listResponse) + Expect(err).NotTo(HaveOccurred()) + for _, reporter := range listResponse { + memory, err := strconv.Atoi(reporter.Memory) + Expect(err).NotTo(HaveOccurred()) + Expect(memory).To(BeNumerically(">", 2000000000)) // 2GiB + diskSize, err := strconv.Atoi(reporter.DiskSize) + Expect(err).NotTo(HaveOccurred()) + Expect(diskSize).To(BeNumerically(">", 11000000000)) // 11GiB + } + }) + It("list machine in human-readable format", func() { + i := new(initMachine) + session, err := mb.setCmd(i.withImage(mb.imagePath)).run() + Expect(err).ToNot(HaveOccurred()) + Expect(session).To(Exit(0)) + + list := new(listMachine) + listSession, err := mb.setCmd(list.withFormat("{{.Memory}} {{.DiskSize}}")).run() + Expect(err).NotTo(HaveOccurred()) + Expect(listSession).To(Exit(0)) + Expect(listSession.outputToString()).To(Equal("2GiB 11GiB")) + }) }) func stripAsterisk(sl []string) { diff --git a/pkg/machine/e2e/machine_pull_test.go b/pkg/machine/e2e/machine_pull_test.go new file mode 100644 index 0000000000..ca81ecf7ff --- /dev/null +++ b/pkg/machine/e2e/machine_pull_test.go @@ -0,0 +1,43 @@ +package e2e_test + +import ( + "context" + "fmt" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/podman/v5/pkg/machine/compression" + "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/ocipull" +) + +func pullOCITestDisk(finalDir string, vmType define.VMType) error { + imageCacheDir, err := define.NewMachineFile(finalDir, nil) + if err != nil { + return err + } + unusedFinalPath, err := imageCacheDir.AppendToNewVMFile(fmt.Sprintf("machinetest-%s", runtime.GOOS), nil) + if err != nil { + return err + } + dirs := define.MachineDirs{ImageCacheDir: imageCacheDir} + ociArtPull, err := ocipull.NewOCIArtifactPull(context.Background(), &dirs, "", "e2emachine", vmType, unusedFinalPath) + if err != nil { + return err + } + _, err = ociArtPull.GetNoCompress() + if err != nil { + return err + } + fp, originalName := ociArtPull.OriginalFileName() + // Rename the download to something we recognize + compressionExt := filepath.Ext(fp) + fqImageName = filepath.Join(tmpDir, strings.TrimSuffix(originalName, compressionExt)) + suiteImageName = filepath.Base(fqImageName) + compressedImage, err := define.NewMachineFile(fp, nil) + if err != nil { + return err + } + return compression.Decompress(compressedImage, fqImageName) +} diff --git a/pkg/machine/e2e/machine_test.go b/pkg/machine/e2e/machine_test.go index 249f0250e0..3f7415d870 100644 --- a/pkg/machine/e2e/machine_test.go +++ b/pkg/machine/e2e/machine_test.go @@ -1,26 +1,19 @@ package e2e_test import ( + "errors" "fmt" - "io" - url2 "net/url" "os" - "path" "path/filepath" "runtime" "strings" "testing" - "time" - "github.com/containers/podman/v5/pkg/machine/wsl" - - "github.com/containers/podman/v5/pkg/machine" - "github.com/containers/podman/v5/pkg/machine/compression" + "github.com/containers/common/pkg/config" "github.com/containers/podman/v5/pkg/machine/define" "github.com/containers/podman/v5/pkg/machine/provider" "github.com/containers/podman/v5/pkg/machine/vmconfigs" "github.com/containers/podman/v5/utils" - crcOs "github.com/crc-org/crc/v2/pkg/os" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -41,7 +34,11 @@ var ( func init() { if value, ok := os.LookupEnv("TMPDIR"); ok { - tmpDir = value + var err error + tmpDir, err = setTmpDir(value) + if err != nil { + fmt.Printf("failed to set TMPDIR: %q\n", err) + } } } @@ -54,79 +51,42 @@ func TestMachine(t *testing.T) { var testProvider vmconfigs.VMProvider var _ = BeforeSuite(func() { - var err error + var ( + err error + pullError error + ) testProvider, err = provider.Get() if err != nil { Fail("unable to create testProvider") } - - downloadLocation := os.Getenv("MACHINE_IMAGE") - if downloadLocation == "" { - // TODO so beautifully gross ... ideally we can spend some time - // here making life easier on the next person - switch testProvider.VMType() { - case define.WSLVirt: - dl, _, _, _, err := wsl.GetFedoraDownloadForWSL() - if err != nil { - Fail("unable to determine WSL download") - } - downloadLocation = dl.String() - default: - downloadLocation, err = GetDownload(testProvider.VMType()) - if err != nil { - Fail(fmt.Sprintf("unable to derive download disk from fedora coreos: %q", err)) - } + if testProvider.VMType() == define.WSLVirt { + pullError = pullWSLDisk() + } else { + // This is a one-off and a little messy but once WSL switches + // to use OCI disk artifacts, we can make all the conditionals cleaner. + testDiskProvider := testProvider.VMType() + if testDiskProvider == define.LibKrun { + testDiskProvider = define.AppleHvVirt // libkrun uses the applehv image for testing } + pullError = pullOCITestDisk(tmpDir, testDiskProvider) } - - if downloadLocation == "" { - Fail("machine tests require a file reference to a disk image right now") - } - - var compressionExtension string - switch testProvider.VMType() { - case define.AppleHvVirt: - compressionExtension = ".gz" - case define.HyperVVirt: - compressionExtension = ".zip" - default: - compressionExtension = ".xz" + if pullError != nil { + Fail(fmt.Sprintf("failed to pull disk: %q", pullError)) } - - suiteImageName = strings.TrimSuffix(path.Base(downloadLocation), compressionExtension) - fqImageName = filepath.Join(tmpDir, suiteImageName) - if _, err := os.Stat(fqImageName); err != nil { - if os.IsNotExist(err) { - getMe, err := url2.Parse(downloadLocation) - if err != nil { - Fail(fmt.Sprintf("unable to create url for download: %q", err)) - } - now := time.Now() - if err := machine.DownloadVMImage(getMe, suiteImageName, fqImageName+compressionExtension); err != nil { - Fail(fmt.Sprintf("unable to download machine image: %q", err)) - } - GinkgoWriter.Println("Download took: ", time.Since(now).String()) - diskImage, err := define.NewMachineFile(fqImageName+compressionExtension, nil) - if err != nil { - Fail(fmt.Sprintf("unable to create vmfile %q: %v", fqImageName+compressionExtension, err)) - } - compressionStart := time.Now() - if err := compression.Decompress(diskImage, fqImageName); err != nil { - Fail(fmt.Sprintf("unable to decompress image file: %q", err)) - } - GinkgoWriter.Println("compression took: ", time.Since(compressionStart)) - } else { - Fail(fmt.Sprintf("unable to check for cache image: %q", err)) - } - } - }) var _ = SynchronizedAfterSuite(func() {}, func() {}) func setup() (string, *machineTestBuilder) { // Set TMPDIR if this needs a new directory - homeDir, err := os.MkdirTemp("", "podman_test") + if value, ok := os.LookupEnv("TMPDIR"); ok { + var err error + tmpDir, err = setTmpDir(value) + if err != nil { + Fail(fmt.Sprintf("failed to set TMPDIR: %q", err)) + } + } + homeDir, err := os.MkdirTemp(tmpDir, "podman_test") if err != nil { Fail(fmt.Sprintf("failed to create home directory: %q", err)) } @@ -157,6 +117,9 @@ func setup() (string, *machineTestBuilder) { if err := os.Unsetenv("SSH_AUTH_SOCK"); err != nil { Fail("unable to unset SSH_AUTH_SOCK") } + if err := os.Setenv("PODMAN_CONNECTIONS_CONF", filepath.Join(homeDir, "connections.json")); err != nil { + Fail("failed to set PODMAN_CONNECTIONS_CONF") + } mb, err := newMB() if err != nil { Fail(fmt.Sprintf("failed to create machine test: %q", err)) @@ -170,37 +133,11 @@ func setup() (string, *machineTestBuilder) { Fail(fmt.Sprintf("failed to close src reader %q: %q", src.Name(), err)) } }() - mb.imagePath = filepath.Join(homeDir, suiteImageName) - dest, err := os.Create(mb.imagePath) - if err != nil { - Fail(fmt.Sprintf("failed to create file %s: %q", mb.imagePath, err)) - } - defer func() { - if err := dest.Close(); err != nil { - Fail(fmt.Sprintf("failed to close destination file %q: %q", dest.Name(), err)) - } - }() - fmt.Printf("--> copying %q to %q/n", src.Name(), dest.Name()) - if runtime.GOOS != "darwin" { - if _, err := io.Copy(dest, src); err != nil { - Fail(fmt.Sprintf("failed to copy %ss to %s: %q", fqImageName, mb.imagePath, err)) - } - } else { - if _, err := crcOs.CopySparse(dest, src); err != nil { - Fail(fmt.Sprintf("failed to copy %q to %q: %q", src.Name(), dest.Name(), err)) - } - } + mb.imagePath = fqImageName return homeDir, mb } -func teardown(origHomeDir string, testDir string, mb *machineTestBuilder) { - r := new(rmMachine) - for _, name := range mb.names { - if _, err := mb.setName(name).setCmd(r.withForce()).run(); err != nil { - GinkgoWriter.Printf("error occurred rm'ing machine: %q\n", err) - } - } - +func teardown(origHomeDir string, testDir string) { if err := utils.GuardedRemoveAll(testDir); err != nil { Fail(fmt.Sprintf("failed to remove test dir: %q", err)) } @@ -214,3 +151,46 @@ func teardown(origHomeDir string, testDir string, mb *machineTestBuilder) { } } } + +var ( + mb *machineTestBuilder + testDir string +) + +var _ = BeforeEach(func() { + testDir, mb = setup() + DeferCleanup(func() { + teardown(originalHomeDir, testDir) + }) +}) + +func setTmpDir(value string) (string, error) { + switch { + case runtime.GOOS != "darwin": + tmpDir = value + case len(value) >= 22: + return "", errors.New(value + " path length should be less than 22 characters") + case value == "": + return "", errors.New("TMPDIR cannot be empty. Set to directory mounted on podman machine (e.g. /private/tmp)") + default: + cfg, err := config.Default() + if err != nil { + return "", err + } + volumes := cfg.Machine.Volumes.Get() + containsPath := false + for _, volume := range volumes { + parts := strings.Split(volume, ":") + hostPath := parts[0] + if strings.Contains(value, hostPath) { + containsPath = true + break + } + } + if !containsPath { + return "", fmt.Errorf("%s cannot be used. Change to directory mounted on podman machine (e.g. /private/tmp)", value) + } + tmpDir = value + } + return tmpDir, nil +} diff --git a/pkg/machine/e2e/machine_wsl_test.go b/pkg/machine/e2e/machine_wsl_test.go new file mode 100644 index 0000000000..009e79934e --- /dev/null +++ b/pkg/machine/e2e/machine_wsl_test.go @@ -0,0 +1,55 @@ +package e2e_test + +import ( + "errors" + "fmt" + url2 "net/url" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/containers/podman/v5/pkg/machine" + "github.com/containers/podman/v5/pkg/machine/compression" + "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/wsl" + . "github.com/onsi/ginkgo/v2" +) + +func pullWSLDisk() error { + downloadLocation := os.Getenv("MACHINE_IMAGE") + if downloadLocation == "" { + dl, _, _, _, err := wsl.GetFedoraDownloadForWSL() + if err != nil { + return errors.New("unable to determine WSL download") + } + downloadLocation = dl.String() + } + + if downloadLocation == "" { + return errors.New("machine tests require a file reference to a disk image right now") + } + compressionExtension := ".zst" + suiteImageName = strings.TrimSuffix(path.Base(downloadLocation), compressionExtension) + fqImageName = filepath.Join(tmpDir, suiteImageName) + getMe, err := url2.Parse(downloadLocation) + if err != nil { + return fmt.Errorf("unable to create url for download: %q", err) + } + now := time.Now() + if err := machine.DownloadVMImage(getMe, suiteImageName, fqImageName+compressionExtension); err != nil { + return fmt.Errorf("unable to download machine image: %q", err) + } + GinkgoWriter.Println("Download took: ", time.Since(now).String()) + diskImage, err := define.NewMachineFile(fqImageName+compressionExtension, nil) + if err != nil { + return fmt.Errorf("unable to create vmfile %q: %v", fqImageName+compressionExtension, err) + } + compressionStart := time.Now() + if err := compression.Decompress(diskImage, fqImageName); err != nil { + return fmt.Errorf("unable to decompress image file: %q", err) + } + GinkgoWriter.Println("compression took: ", time.Since(compressionStart)) + return nil +} diff --git a/pkg/machine/e2e/os_test.go b/pkg/machine/e2e/os_test.go index 65de1b1a33..9fd907dde9 100644 --- a/pkg/machine/e2e/os_test.go +++ b/pkg/machine/e2e/os_test.go @@ -7,21 +7,10 @@ package e2e_test // ) // var _ = Describe("podman machine os apply", func() { -// var ( -// mb *machineTestBuilder -// testDir string -// ) - -// BeforeEach(func() { -// testDir, mb = setup() -// }) -// AfterEach(func() { -// teardown(originalHomeDir, testDir, mb) -// }) // It("apply machine", func() { // i := new(initMachine) -// foo1, err := mb.setName("foo1").setCmd(i.withImagePath(mb.imagePath)).run() +// foo1, err := mb.setName("foo1").setCmd(i.withImage(mb.imagePath)).run() // Expect(err).ToNot(HaveOccurred()) // Expect(foo1).To(Exit(0)) @@ -33,7 +22,7 @@ package e2e_test // It("apply machine from containers-storage", func() { // i := new(initMachine) -// foo1, err := mb.setName("foo1").setCmd(i.withImagePath(mb.imagePath)).run() +// foo1, err := mb.setName("foo1").setCmd(i.withImage(mb.imagePath)).run() // Expect(err).ToNot(HaveOccurred()) // Expect(foo1).To(Exit(0)) diff --git a/pkg/machine/e2e/proxy_test.go b/pkg/machine/e2e/proxy_test.go index eb48e40f99..d539e95fb9 100644 --- a/pkg/machine/e2e/proxy_test.go +++ b/pkg/machine/e2e/proxy_test.go @@ -2,6 +2,7 @@ package e2e_test import ( "os" + "path/filepath" "github.com/containers/podman/v5/pkg/machine/define" . "github.com/onsi/ginkgo/v2" @@ -10,42 +11,32 @@ import ( ) var _ = Describe("podman machine proxy settings propagation", func() { - var ( - mb *machineTestBuilder - testDir string - ) - - BeforeEach(func() { - testDir, mb = setup() - }) - AfterEach(func() { - teardown(originalHomeDir, testDir, mb) - }) It("ssh to running machine and check proxy settings", func() { - // TODO the proxy test is currently failing on applehv. FIX ME - skipIfVmtype(define.AppleHvVirt, "TODO: this test fails on applehv") + defer func() { + os.Unsetenv("HTTP_PROXY") + os.Unsetenv("HTTPS_PROXY") + os.Unsetenv("SSL_CERT_DIR") + os.Unsetenv("SSL_CERT_FILE") + }() + + certFileDir := GinkgoT().TempDir() + certDir := GinkgoT().TempDir() + certFile := filepath.Join(certFileDir, "cert1") + err := os.WriteFile(certFile, []byte("cert1 content\n"), os.ModePerm) + Expect(err).ToNot(HaveOccurred()) + err = os.WriteFile(filepath.Join(certDir, "cert2"), []byte("cert2 content\n"), os.ModePerm) + Expect(err).ToNot(HaveOccurred()) + + os.Setenv("SSL_CERT_FILE", certFile) + os.Setenv("SSL_CERT_DIR", certDir) - // https://github.com/containers/podman/issues/20129 - if testProvider.VMType() == define.HyperVVirt { - Skip("proxy settings not yet supported") - } name := randomString() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) - defer func() { - httpProxyEnv := os.Getenv("HTTP_PROXY") - httpsProxyEnv := os.Getenv("HTTPS_PROXY") - if httpProxyEnv != "" { - os.Unsetenv("HTTP_PROXY") - } - if httpsProxyEnv != "" { - os.Unsetenv("HTTPS_PROXY") - } - }() proxyURL := "http://abcdefghijklmnopqrstuvwxyz-proxy" os.Setenv("HTTP_PROXY", proxyURL) os.Setenv("HTTPS_PROXY", proxyURL) @@ -65,5 +56,52 @@ var _ = Describe("podman machine proxy settings propagation", func() { Expect(err).ToNot(HaveOccurred()) Expect(sshSession).To(Exit(0)) Expect(sshSession.outputToString()).To(ContainSubstring(proxyURL)) + + // SSL_CERT not implemented for WSL + if !isVmtype(define.WSLVirt) { + sshSession, err = mb.setName(name).setCmd(sshProxy.withSSHCommand([]string{"printenv", "SSL_CERT_DIR", "SSL_CERT_FILE"})).run() + Expect(err).ToNot(HaveOccurred()) + Expect(sshSession).To(Exit(0)) + Expect(string(sshSession.Out.Contents())).To(Equal(define.UserCertsTargetPath + "\n" + define.UserCertsTargetPath + "/cert1" + "\n")) + + sshSession, err = mb.setName(name).setCmd(sshProxy.withSSHCommand([]string{"cat", "$SSL_CERT_DIR/cert2", "$SSL_CERT_FILE"})).run() + Expect(err).ToNot(HaveOccurred()) + Expect(sshSession).To(Exit(0)) + Expect(string(sshSession.Out.Contents())).To(Equal("cert2 content\ncert1 content\n")) + } + + stop := new(stopMachine) + stopSession, err := mb.setName(name).setCmd(stop).run() + Expect(err).ToNot(HaveOccurred()) + Expect(stopSession).To(Exit(0)) + + // Now update proxy env, lets use some special vars to make sure our scripts can handle it + proxy1 := "http:// some special @;\" here" + proxy2 := "https://abc :£$%6 : |\"\"" + os.Setenv("HTTP_PROXY", proxy1) + os.Setenv("HTTPS_PROXY", proxy2) + + // changing SSL_CERT vars should not have an effect + os.Setenv("SSL_CERT_FILE", "/tmp/1") + os.Setenv("SSL_CERT_DIR", "/tmp") + + // start it again should update the proxies + startSession, err = mb.setName(name).setCmd(s).run() + Expect(err).ToNot(HaveOccurred()) + Expect(startSession).To(Exit(0)) + + sshSession, err = mb.setName(name).setCmd(sshProxy.withSSHCommand([]string{"printenv", "HTTP_PROXY", "HTTPS_PROXY"})).run() + Expect(err).ToNot(HaveOccurred()) + Expect(sshSession).To(Exit(0)) + Expect(string(sshSession.Out.Contents())).To(Equal(proxy1 + "\n" + proxy2 + "\n")) + + // SSL_CERT not implemented for WSL + if !isVmtype(define.WSLVirt) { + // SSL_CERT... must still be the same as before + sshSession, err = mb.setName(name).setCmd(sshProxy.withSSHCommand([]string{"cat", "$SSL_CERT_DIR/cert2", "$SSL_CERT_FILE"})).run() + Expect(err).ToNot(HaveOccurred()) + Expect(sshSession).To(Exit(0)) + Expect(string(sshSession.Out.Contents())).To(Equal("cert2 content\ncert1 content\n")) + } }) }) diff --git a/pkg/machine/e2e/reset_test.go b/pkg/machine/e2e/reset_test.go index 655a00efdd..f997bf864c 100644 --- a/pkg/machine/e2e/reset_test.go +++ b/pkg/machine/e2e/reset_test.go @@ -7,17 +7,6 @@ import ( ) var _ = Describe("podman machine reset", func() { - var ( - mb *machineTestBuilder - testDir string - ) - - BeforeEach(func() { - testDir, mb = setup() - }) - AfterEach(func() { - teardown(originalHomeDir, testDir, mb) - }) It("starting from scratch should not error", func() { i := resetMachine{} @@ -29,7 +18,7 @@ var _ = Describe("podman machine reset", func() { It("reset machine with one defined machine", func() { name := randomString() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -53,7 +42,7 @@ var _ = Describe("podman machine reset", func() { It("reset with running machine and other machines idle ", func() { name := randomString() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath).withNow()).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath).withNow()).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -65,7 +54,7 @@ var _ = Describe("podman machine reset", func() { name2 := randomString() i2 := new(initMachine) - session2, err := mb.setName(name2).setCmd(i2.withImagePath(mb.imagePath)).run() + session2, err := mb.setName(name2).setCmd(i2.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session2).To(Exit(0)) diff --git a/pkg/machine/e2e/rm_test.go b/pkg/machine/e2e/rm_test.go index 1e27863c40..210e6ed09f 100644 --- a/pkg/machine/e2e/rm_test.go +++ b/pkg/machine/e2e/rm_test.go @@ -12,17 +12,6 @@ import ( ) var _ = Describe("podman machine rm", func() { - var ( - mb *machineTestBuilder - testDir string - ) - - BeforeEach(func() { - testDir, mb = setup() - }) - AfterEach(func() { - teardown(originalHomeDir, testDir, mb) - }) It("bad init name", func() { i := rmMachine{} @@ -35,7 +24,7 @@ var _ = Describe("podman machine rm", func() { It("Remove machine", func() { name := randomString() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) rm := rmMachine{} @@ -59,7 +48,7 @@ var _ = Describe("podman machine rm", func() { It("Remove running machine", func() { name := randomString() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath).withNow()).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath).withNow()).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) rm := new(rmMachine) @@ -83,7 +72,7 @@ var _ = Describe("podman machine rm", func() { It("machine rm --save-ignition --save-image", func() { i := new(initMachine) - session, err := mb.setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -94,16 +83,6 @@ var _ = Describe("podman machine rm", func() { key := inspectSession.outputToString() pubkey := key + ".pub" - inspect = inspect.withFormat("{{.Image.IgnitionFile.Path}}") - inspectSession, err = mb.setCmd(inspect).run() - Expect(err).ToNot(HaveOccurred()) - ign := inspectSession.outputToString() - - inspect = inspect.withFormat("{{.Image.ImagePath.Path}}") - inspectSession, err = mb.setCmd(inspect).run() - Expect(err).ToNot(HaveOccurred()) - img := inspectSession.outputToString() - rm := rmMachine{} removeSession, err := mb.setCmd(rm.withForce().withSaveIgnition().withSaveImage()).run() Expect(err).ToNot(HaveOccurred()) @@ -122,10 +101,11 @@ var _ = Describe("podman machine rm", func() { // WSL does not use ignition if testProvider.VMType() != define.WSLVirt { - _, err = os.Stat(ign) + ignPath := filepath.Join(testDir, ".config", "containers", "podman", "machine", testProvider.VMType().String(), mb.name+".ign") + _, err = os.Stat(ignPath) Expect(err).ToNot(HaveOccurred()) } - _, err = os.Stat(img) + _, err = os.Stat(mb.imagePath) Expect(err).ToNot(HaveOccurred()) }) @@ -134,13 +114,13 @@ var _ = Describe("podman machine rm", func() { fooName := "foo" foo := new(initMachine) - session, err := mb.setName(fooName).setCmd(foo.withImagePath(mb.imagePath)).run() + session, err := mb.setName(fooName).setCmd(foo.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) barName := "bar" bar := new(initMachine) - session, err = mb.setName(barName).setCmd(bar.withImagePath(mb.imagePath).withNow()).run() + session, err = mb.setName(barName).setCmd(bar.withImage(mb.imagePath).withNow()).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -174,7 +154,7 @@ var _ = Describe("podman machine rm", func() { It("Removing all machines doesn't delete ssh keys", func() { fooName := "foo" foo := new(initMachine) - session, err := mb.setName(fooName).setCmd(foo.withImagePath(mb.imagePath)).run() + session, err := mb.setName(fooName).setCmd(foo.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) diff --git a/pkg/machine/e2e/set_test.go b/pkg/machine/e2e/set_test.go index f1aa5b4771..0becff0083 100644 --- a/pkg/machine/e2e/set_test.go +++ b/pkg/machine/e2e/set_test.go @@ -13,26 +13,20 @@ import ( ) var _ = Describe("podman machine set", func() { - var ( - mb *machineTestBuilder - testDir string - ) - - BeforeEach(func() { - testDir, mb = setup() - }) - AfterEach(func() { - teardown(originalHomeDir, testDir, mb) - }) It("set machine cpus, disk, memory", func() { skipIfWSL("WSL cannot change set properties of disk, processor, or memory") name := randomString() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) + setMem := setMachine{} + SetMemSession, err := mb.setName(name).setCmd(setMem.withMemory(524288)).run() + Expect(err).ToNot(HaveOccurred()) + Expect(SetMemSession).To(Exit(125)) + set := setMachine{} setSession, err := mb.setName(name).setCmd(set.withCPUs(2).withDiskSize(102).withMemory(4096)).run() Expect(err).ToNot(HaveOccurred()) @@ -79,7 +73,7 @@ var _ = Describe("podman machine set", func() { skipIfNotVmtype(define.WSLVirt, "tests are only for WSL provider") name := randomString() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -104,7 +98,7 @@ var _ = Describe("podman machine set", func() { skipIfWSL("WSL cannot change set properties of disk, processor, or memory") name := randomString() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -138,7 +132,7 @@ var _ = Describe("podman machine set", func() { It("set rootful with docker sock change", func() { name := randomString() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -176,7 +170,7 @@ var _ = Describe("podman machine set", func() { name := randomString() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -196,7 +190,7 @@ var _ = Describe("podman machine set", func() { It("set while machine already running", func() { name := randomString() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) diff --git a/pkg/machine/e2e/ssh_test.go b/pkg/machine/e2e/ssh_test.go index 6bd6268454..d360fd48d0 100644 --- a/pkg/machine/e2e/ssh_test.go +++ b/pkg/machine/e2e/ssh_test.go @@ -8,17 +8,6 @@ import ( ) var _ = Describe("podman machine ssh", func() { - var ( - mb *machineTestBuilder - testDir string - ) - - BeforeEach(func() { - testDir, mb = setup() - }) - AfterEach(func() { - teardown(originalHomeDir, testDir, mb) - }) It("bad machine name", func() { name := randomString() @@ -32,7 +21,7 @@ var _ = Describe("podman machine ssh", func() { It("ssh to non-running machine", func() { name := randomString() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) @@ -47,7 +36,7 @@ var _ = Describe("podman machine ssh", func() { wsl := testProvider.VMType() == define.WSLVirt name := randomString() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath).withNow()).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath).withNow()).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) diff --git a/pkg/machine/e2e/start_test.go b/pkg/machine/e2e/start_test.go index 206a1d6cc4..efd65311db 100644 --- a/pkg/machine/e2e/start_test.go +++ b/pkg/machine/e2e/start_test.go @@ -1,6 +1,10 @@ package e2e_test import ( + "fmt" + "net" + "net/url" + "sync" "time" "github.com/containers/podman/v5/pkg/machine/define" @@ -10,20 +14,10 @@ import ( ) var _ = Describe("podman machine start", func() { - var ( - mb *machineTestBuilder - testDir string - ) - BeforeEach(func() { - testDir, mb = setup() - }) - AfterEach(func() { - teardown(originalHomeDir, testDir, mb) - }) It("start simple machine", func() { i := new(initMachine) - session, err := mb.setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) s := new(startMachine) @@ -68,7 +62,7 @@ var _ = Describe("podman machine start", func() { It("start machine already started", func() { i := new(initMachine) - session, err := mb.setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) s := new(startMachine) @@ -87,16 +81,65 @@ var _ = Describe("podman machine start", func() { Expect(startSession.errorToString()).To(ContainSubstring("VM already running or starting")) }) + It("start machine with conflict on SSH port", func() { + i := new(initMachine) + session, err := mb.setCmd(i.withImage(mb.imagePath)).run() + Expect(err).ToNot(HaveOccurred()) + Expect(session).To(Exit(0)) + + inspect := new(inspectMachine) + inspectSession, err := mb.setCmd(inspect.withFormat("{{.SSHConfig.Port}}")).run() + Expect(err).ToNot(HaveOccurred()) + Expect(inspectSession).To(Exit(0)) + inspectPort := inspectSession.outputToString() + + connections := new(listSystemConnection) + connectionsSession, err := mb.setCmd(connections.withFormat("{{.URI}}")).run() + Expect(err).ToNot(HaveOccurred()) + Expect(connectionsSession).To(Exit(0)) + connectionURLs := connectionsSession.outputToStringSlice() + connectionPorts, err := mapToPort(connectionURLs) + Expect(err).ToNot(HaveOccurred()) + Expect(connectionPorts).To(HaveEach(inspectPort)) + + // start a listener on the ssh port + listener, err := net.Listen("tcp", "127.0.0.1:"+inspectPort) + Expect(err).ToNot(HaveOccurred()) + defer listener.Close() + + s := new(startMachine) + startSession, err := mb.setCmd(s).run() + Expect(err).ToNot(HaveOccurred()) + Expect(startSession).To(Exit(0)) + Expect(startSession.errorToString()).To(ContainSubstring("detected port conflict on machine ssh port")) + + inspect2 := new(inspectMachine) + inspectSession2, err := mb.setCmd(inspect2.withFormat("{{.SSHConfig.Port}}")).run() + Expect(err).ToNot(HaveOccurred()) + Expect(inspectSession2).To(Exit(0)) + inspectPort2 := inspectSession2.outputToString() + Expect(inspectPort2).To(Not(Equal(inspectPort))) + + connections2 := new(listSystemConnection) + connectionsSession2, err := mb.setCmd(connections2.withFormat("{{.URI}}")).run() + Expect(err).ToNot(HaveOccurred()) + Expect(connectionsSession2).To(Exit(0)) + connectionURLs2 := connectionsSession2.outputToStringSlice() + connectionPorts2, err := mapToPort(connectionURLs2) + Expect(err).ToNot(HaveOccurred()) + Expect(connectionPorts2).To(HaveEach(inspectPort2)) + }) + It("start only starts specified machine", func() { i := initMachine{} startme := randomString() - session, err := mb.setName(startme).setCmd(i.withImagePath(mb.imagePath)).run() + session, err := mb.setName(startme).setCmd(i.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) j := initMachine{} dontstartme := randomString() - session2, err := mb.setName(dontstartme).setCmd(j.withImagePath(mb.imagePath)).run() + session2, err := mb.setName(dontstartme).setCmd(j.withImage(mb.imagePath)).run() Expect(err).ToNot(HaveOccurred()) Expect(session2).To(Exit(0)) @@ -119,4 +162,77 @@ var _ = Describe("podman machine start", func() { Expect(inspectSession2).To(Exit(0)) Expect(inspectSession2.outputToString()).To(Not(Equal(define.Running))) }) + + It("start two machines in parallel", func() { + i := initMachine{} + machine1 := "m1-" + randomString() + session, err := mb.setName(machine1).setCmd(i.withImage(mb.imagePath)).run() + Expect(err).ToNot(HaveOccurred()) + Expect(session).To(Exit(0)) + + machine2 := "m2-" + randomString() + session, err = mb.setName(machine2).setCmd(i.withImage(mb.imagePath)).run() + Expect(session).To(Exit(0)) + + var startSession1, startSession2 *machineSession + wg := sync.WaitGroup{} + wg.Add(2) + // now start two machine start process in parallel + go func() { + defer GinkgoRecover() + defer wg.Done() + s := startMachine{} + startSession1, err = mb.setName(machine1).setCmd(s).setTimeout(time.Minute * 10).run() + Expect(err).ToNot(HaveOccurred()) + }() + go func() { + defer GinkgoRecover() + defer wg.Done() + s := startMachine{} + // ok this is a hack and should not be needed but the way these test are setup they all + // share "mb" which stores the name that is used for the VM, thus running two parallel + // can overwrite the name from the other, work around that by creating a new mb for the + // second run. + nmb, err := newMB() + Expect(err).ToNot(HaveOccurred()) + startSession2, err = nmb.setName(machine2).setCmd(s).setTimeout(time.Minute * 10).run() + Expect(err).ToNot(HaveOccurred()) + }() + wg.Wait() + + // WSL can start in parallel so just check both command exit 0 there + if testProvider.VMType() == define.WSLVirt { + Expect(startSession1).To(Exit(0)) + Expect(startSession2).To(Exit(0)) + return + } + // other providers have a check that only one VM can be running at any given time so make sure our check is race free + Expect(startSession1).To(Or(Exit(0), Exit(125)), "start command should succeed or fail with 125") + if startSession1.ExitCode() == 0 { + Expect(startSession2).To(Exit(125), "first start worked, second start must fail") + Expect(startSession2.errorToString()).To(ContainSubstring("machine %s: VM already running or starting", machine1)) + } else { + Expect(startSession2).To(Exit(0), "first start failed, second start succeed") + Expect(startSession1.errorToString()).To(ContainSubstring("machine %s: VM already running or starting", machine2)) + } + }) }) + +func mapToPort(uris []string) ([]string, error) { + ports := []string{} + + for _, uri := range uris { + u, err := url.Parse(uri) + if err != nil { + return nil, err + } + + port := u.Port() + if port == "" { + return nil, fmt.Errorf("no port in URI: %s", uri) + } + + ports = append(ports, port) + } + return ports, nil +} diff --git a/pkg/machine/e2e/stop_test.go b/pkg/machine/e2e/stop_test.go index 11c308e51b..780858344d 100644 --- a/pkg/machine/e2e/stop_test.go +++ b/pkg/machine/e2e/stop_test.go @@ -10,17 +10,6 @@ import ( ) var _ = Describe("podman machine stop", func() { - var ( - mb *machineTestBuilder - testDir string - ) - - BeforeEach(func() { - testDir, mb = setup() - }) - AfterEach(func() { - teardown(originalHomeDir, testDir, mb) - }) It("stop bad name", func() { i := stopMachine{} @@ -34,7 +23,7 @@ var _ = Describe("podman machine stop", func() { name := randomString() i := new(initMachine) starttime := time.Now() - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath).withNow()).run() + session, err := mb.setName(name).setCmd(i.withImage(mb.imagePath).withNow()).run() Expect(err).ToNot(HaveOccurred()) Expect(session).To(Exit(0)) diff --git a/pkg/machine/env/dir.go b/pkg/machine/env/dir.go new file mode 100644 index 0000000000..18dda22998 --- /dev/null +++ b/pkg/machine/env/dir.go @@ -0,0 +1,160 @@ +package env + +import ( + "errors" + "os" + "path/filepath" + "strings" + + "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/storage/pkg/fileutils" + "github.com/containers/storage/pkg/homedir" +) + +// GetCacheDir returns the dir where VM images are downloaded into when pulled +func GetCacheDir(vmType define.VMType) (string, error) { + dataDir, err := GetDataDir(vmType) + if err != nil { + return "", err + } + cacheDir := filepath.Join(dataDir, "cache") + if err := fileutils.Exists(cacheDir); !errors.Is(err, os.ErrNotExist) { + return cacheDir, nil + } + return cacheDir, os.MkdirAll(cacheDir, 0755) +} + +// GetDataDir returns the filepath where vm images should +// live for podman-machine. +func GetDataDir(vmType define.VMType) (string, error) { + dataDirPrefix, err := DataDirPrefix() + if err != nil { + return "", err + } + dataDir := filepath.Join(dataDirPrefix, vmType.String()) + if err := fileutils.Exists(dataDir); !errors.Is(err, os.ErrNotExist) { + return dataDir, nil + } + mkdirErr := os.MkdirAll(dataDir, 0755) + return dataDir, mkdirErr +} + +// GetGlobalDataDir returns the root of all backends +// for shared machine data. +func GetGlobalDataDir() (string, error) { + dataDir, err := DataDirPrefix() + if err != nil { + return "", err + } + + return dataDir, os.MkdirAll(dataDir, 0755) +} + +func GetMachineDirs(vmType define.VMType) (*define.MachineDirs, error) { + rtDir, err := getRuntimeDir() + if err != nil { + return nil, err + } + + rtDir = filepath.Join(rtDir, "podman") + configDir, err := GetConfDir(vmType) + if err != nil { + return nil, err + } + + configDirFile, err := define.NewMachineFile(configDir, nil) + if err != nil { + return nil, err + } + dataDir, err := GetDataDir(vmType) + if err != nil { + return nil, err + } + + dataDirFile, err := define.NewMachineFile(dataDir, nil) + if err != nil { + return nil, err + } + + imageCacheDir, err := dataDirFile.AppendToNewVMFile("cache", nil) + if err != nil { + return nil, err + } + + rtDirFile, err := define.NewMachineFile(rtDir, nil) + if err != nil { + return nil, err + } + + dirs := define.MachineDirs{ + ConfigDir: configDirFile, + DataDir: dataDirFile, + ImageCacheDir: imageCacheDir, + RuntimeDir: rtDirFile, + } + + // make sure all machine dirs are present + if err := os.MkdirAll(rtDir, 0755); err != nil { + return nil, err + } + if err := os.MkdirAll(configDir, 0755); err != nil { + return nil, err + } + + // Because this is a mkdirall, we make the image cache dir + // which is a subdir of datadir (so the datadir is made anyway) + err = os.MkdirAll(imageCacheDir.GetPath(), 0755) + + return &dirs, err +} + +// DataDirPrefix returns the path prefix for all machine data files +func DataDirPrefix() (string, error) { + data, err := homedir.GetDataHome() + if err != nil { + return "", err + } + dataDir := filepath.Join(data, "containers", "podman", "machine") + return dataDir, nil +} + +// GetConfigDir returns the filepath to where configuration +// files for podman-machine should live +func GetConfDir(vmType define.VMType) (string, error) { + confDirPrefix, err := ConfDirPrefix() + if err != nil { + return "", err + } + confDir := filepath.Join(confDirPrefix, vmType.String()) + if err := fileutils.Exists(confDir); !errors.Is(err, os.ErrNotExist) { + return confDir, nil + } + mkdirErr := os.MkdirAll(confDir, 0755) + return confDir, mkdirErr +} + +// ConfDirPrefix returns the path prefix for all machine config files +func ConfDirPrefix() (string, error) { + conf, err := homedir.GetConfigHome() + if err != nil { + return "", err + } + confDir := filepath.Join(conf, "containers", "podman", "machine") + return confDir, nil +} + +// GetSSHIdentityPath returns the path to the expected SSH private key +func GetSSHIdentityPath(name string) (string, error) { + datadir, err := GetGlobalDataDir() + if err != nil { + return "", err + } + return filepath.Join(datadir, name), nil +} + +func WithPodmanPrefix(name string) string { + if !strings.HasPrefix(name, "podman") { + name = "podman-" + name + } + return name +} diff --git a/pkg/machine/env/dir_darwin.go b/pkg/machine/env/dir_darwin.go new file mode 100644 index 0000000000..ed72aec8ab --- /dev/null +++ b/pkg/machine/env/dir_darwin.go @@ -0,0 +1,11 @@ +package env + +import "os" + +func getRuntimeDir() (string, error) { + tmpDir, ok := os.LookupEnv("TMPDIR") + if !ok { + tmpDir = "/tmp" + } + return tmpDir, nil +} diff --git a/pkg/machine/env/dir_freebsd.go b/pkg/machine/env/dir_freebsd.go new file mode 100644 index 0000000000..ed72aec8ab --- /dev/null +++ b/pkg/machine/env/dir_freebsd.go @@ -0,0 +1,11 @@ +package env + +import "os" + +func getRuntimeDir() (string, error) { + tmpDir, ok := os.LookupEnv("TMPDIR") + if !ok { + tmpDir = "/tmp" + } + return tmpDir, nil +} diff --git a/pkg/machine/env/dir_linux.go b/pkg/machine/env/dir_linux.go new file mode 100644 index 0000000000..67f8142440 --- /dev/null +++ b/pkg/machine/env/dir_linux.go @@ -0,0 +1,13 @@ +package env + +import ( + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/util" +) + +func getRuntimeDir() (string, error) { + if !rootless.IsRootless() { + return "/run", nil + } + return util.GetRootlessRuntimeDir() +} diff --git a/pkg/machine/env/dir_windows.go b/pkg/machine/env/dir_windows.go new file mode 100644 index 0000000000..9a272568ca --- /dev/null +++ b/pkg/machine/env/dir_windows.go @@ -0,0 +1,11 @@ +package env + +import "os" + +func getRuntimeDir() (string, error) { + tmpDir, ok := os.LookupEnv("TEMP") + if !ok { + tmpDir = os.Getenv("LOCALAPPDATA") + "\\Temp" + } + return tmpDir, nil +} diff --git a/pkg/machine/fedora_windows.go b/pkg/machine/fedora_windows.go index a3cd7f0328..1b91b006fe 100644 --- a/pkg/machine/fedora_windows.go +++ b/pkg/machine/fedora_windows.go @@ -14,7 +14,7 @@ func DetermineMachineArch() string { current, _ := syscall.GetCurrentProcess() if err := windows.IsWow64Process2(windows.Handle(current), &machine, &native); err != nil { - logrus.Warnf("Failure detecting native system architecture, %s: %w", fallbackMsg, err) + logrus.Warnf("Failure detecting native system architecture, %s: %v", fallbackMsg, err) // Fall-back to binary arch return runtime.GOARCH } @@ -26,7 +26,7 @@ func DetermineMachineArch() string { case 0x8664: return "amd64" default: - logrus.Warnf("Unknown or unsupported native system architecture [%d], %s", fallbackMsg) + logrus.Warnf("Unknown or unsupported native system architecture [%d], %s", native, fallbackMsg) return runtime.GOARCH } } diff --git a/pkg/machine/gvproxy.go b/pkg/machine/gvproxy.go index 6a008d22d4..fc236038eb 100644 --- a/pkg/machine/gvproxy.go +++ b/pkg/machine/gvproxy.go @@ -1,45 +1,24 @@ package machine import ( + "errors" "fmt" + "io/fs" "strconv" - "time" "github.com/containers/podman/v5/pkg/machine/define" - psutil "github.com/shirou/gopsutil/v3/process" ) -const ( - loops = 8 - sleepTime = time.Millisecond * 1 -) - -// backoffForProcess checks if the process still exists, for something like -// sigterm. If the process still exists after loops and sleep time are exhausted, -// an error is returned -func backoffForProcess(p *psutil.Process) error { - sleepInterval := sleepTime - for i := 0; i < loops; i++ { - running, err := p.IsRunning() - if err != nil { - return fmt.Errorf("checking if process running: %w", err) - } - if !running { - return nil - } - - time.Sleep(sleepInterval) - // double the time - sleepInterval += sleepInterval - } - return fmt.Errorf("process %d has not ended", p.Pid) -} - // CleanupGVProxy reads the --pid-file for gvproxy attempts to stop it func CleanupGVProxy(f define.VMFile) error { gvPid, err := f.Read() if err != nil { - return fmt.Errorf("unable to read gvproxy pid file %s: %v", f.GetPath(), err) + // The file will also be removed by gvproxy when it exits so + // we need to account for the race and can just ignore it here. + if errors.Is(err, fs.ErrNotExist) { + return nil + } + return fmt.Errorf("unable to read gvproxy pid file: %v", err) } proxyPid, err := strconv.Atoi(string(gvPid)) if err != nil { diff --git a/pkg/machine/gvproxy_unix.go b/pkg/machine/gvproxy_unix.go index 431e34740f..20869d583e 100644 --- a/pkg/machine/gvproxy_unix.go +++ b/pkg/machine/gvproxy_unix.go @@ -6,11 +6,44 @@ import ( "errors" "fmt" "syscall" + "time" psutil "github.com/shirou/gopsutil/v3/process" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) +const ( + loops = 8 + sleepTime = time.Millisecond * 1 +) + +// backoffForProcess checks if the process still exists, for something like +// sigterm. If the process still exists after loops and sleep time are exhausted, +// an error is returned +func backoffForProcess(p *psutil.Process) error { + sleepInterval := sleepTime + for i := 0; i < loops; i++ { + running, err := p.IsRunning() + if err != nil { + // It is possible that while in our loop, the PID vaporize triggering + // an input/output error (#21845) + if errors.Is(err, unix.EIO) { + return nil + } + return fmt.Errorf("checking if process running: %w", err) + } + if !running { + return nil + } + + time.Sleep(sleepInterval) + // double the time + sleepInterval += sleepInterval + } + return fmt.Errorf("process %d has not ended", p.Pid) +} + // / waitOnProcess takes a pid and sends a sigterm to it. it then waits for the // process to not exist. if the sigterm does not end the process after an interval, // then sigkill is sent. it also waits for the process to exit after the sigkill too. diff --git a/pkg/machine/gvproxy_windows.go b/pkg/machine/gvproxy_windows.go index 75da8ad20d..d0d5f1b725 100644 --- a/pkg/machine/gvproxy_windows.go +++ b/pkg/machine/gvproxy_windows.go @@ -16,6 +16,7 @@ func waitOnProcess(processID int) error { // FindProcess on Windows will return an error when the process is not found // if a process can not be found then it has already exited and there is // nothing left to do, so return without error + //nolint:nilerr return nil } diff --git a/pkg/machine/hyperv/stubber.go b/pkg/machine/hyperv/stubber.go index cf388561d6..11f9e8620a 100644 --- a/pkg/machine/hyperv/stubber.go +++ b/pkg/machine/hyperv/stubber.go @@ -10,16 +10,16 @@ import ( "os/exec" "path/filepath" - "github.com/containers/podman/v5/pkg/machine/shim/diskpull" - "github.com/Microsoft/go-winio" "github.com/containers/common/pkg/strongunits" gvproxy "github.com/containers/gvisor-tap-vsock/pkg/types" "github.com/containers/libhvee/pkg/hypervctl" "github.com/containers/podman/v5/pkg/machine" "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/env" "github.com/containers/podman/v5/pkg/machine/hyperv/vsock" "github.com/containers/podman/v5/pkg/machine/ignition" + "github.com/containers/podman/v5/pkg/machine/shim/diskpull" "github.com/containers/podman/v5/pkg/machine/vmconfigs" "github.com/containers/podman/v5/pkg/systemd/parser" "github.com/sirupsen/logrus" @@ -45,15 +45,15 @@ func (h HyperVStubber) CreateVM(opts define.CreateVMOpts, mc *vmconfigs.MachineC var ( err error ) - callbackFuncs := machine.InitCleanup() + callbackFuncs := machine.CleanUp() defer callbackFuncs.CleanIfErr(&err) go callbackFuncs.CleanOnSignal() hwConfig := hypervctl.HardwareConfig{ CPUs: uint16(mc.Resources.CPUs), DiskPath: mc.ImagePath.GetPath(), - DiskSize: mc.Resources.DiskSize, - Memory: mc.Resources.Memory, + DiskSize: uint64(mc.Resources.DiskSize), + Memory: uint64(mc.Resources.Memory), } networkHVSock, err := vsock.NewHVSockRegistryEntry(mc.Name, vsock.Network) @@ -119,7 +119,7 @@ func (h HyperVStubber) CreateVM(opts define.CreateVMOpts, mc *vmconfigs.MachineC } callbackFuncs.Add(vmRemoveCallback) - err = resizeDisk(strongunits.GiB(mc.Resources.DiskSize), mc.ImagePath) + err = resizeDisk(mc.Resources.DiskSize, mc.ImagePath) return err } @@ -138,9 +138,6 @@ func (h HyperVStubber) MountVolumesToVM(mc *vmconfigs.MachineConfig, quiet bool) } func (h HyperVStubber) Remove(mc *vmconfigs.MachineConfig) ([]string, func() error, error) { - mc.Lock() - defer mc.Unlock() - _, vm, err := GetVMFromMC(mc) if err != nil { return nil, nil, err @@ -182,7 +179,7 @@ func (h HyperVStubber) StartVM(mc *vmconfigs.MachineConfig) (func() error, func( return nil, nil, err } - callbackFuncs := machine.InitCleanup() + callbackFuncs := machine.CleanUp() defer callbackFuncs.CleanIfErr(&err) go callbackFuncs.CleanOnSignal() @@ -215,8 +212,15 @@ func (h HyperVStubber) StartVM(mc *vmconfigs.MachineConfig) (func() error, func( callbackFuncs.Add(rmIgnCallbackFunc) } + waitReady, listener, err := mc.HyperVHypervisor.ReadyVsock.ListenSetupWait() + if err != nil { + return nil, nil, err + } + err = vm.Start() if err != nil { + // cleanup the pending listener + _ = listener.Close() return nil, nil, err } @@ -225,7 +229,7 @@ func (h HyperVStubber) StartVM(mc *vmconfigs.MachineConfig) (func() error, func( } callbackFuncs.Add(startCallback) - return nil, mc.HyperVHypervisor.ReadyVsock.Listen, err + return nil, waitReady, err } // State is returns the state as a define.status. for hyperv, state differs from others because @@ -240,9 +244,6 @@ func (h HyperVStubber) State(mc *vmconfigs.MachineConfig, bypass bool) (define.S } func (h HyperVStubber) StopVM(mc *vmconfigs.MachineConfig, hardStop bool) error { - mc.Lock() - defer mc.Unlock() - vmm := hypervctl.NewVirtualMachineManager() vm, err := vmm.GetMachine(mc.Name) if err != nil { @@ -300,9 +301,6 @@ func (h HyperVStubber) SetProviderAttrs(mc *vmconfigs.MachineConfig, opts define cpuChanged, memoryChanged bool ) - mc.Lock() - defer mc.Unlock() - _, vm, err := GetVMFromMC(mc) if err != nil { return err @@ -338,9 +336,10 @@ func (h HyperVStubber) SetProviderAttrs(mc *vmconfigs.MachineConfig, opts define }, func(ms *hypervctl.MemorySettings) { if memoryChanged { ms.DynamicMemoryEnabled = false - ms.VirtualQuantity = *opts.Memory - ms.Limit = *opts.Memory - ms.Reservation = *opts.Memory + mem := uint64(*opts.Memory) + ms.VirtualQuantity = mem + ms.Limit = mem + ms.Reservation = mem } }) if err != nil { @@ -377,73 +376,81 @@ func (h HyperVStubber) PostStartNetworking(mc *vmconfigs.MachineConfig, noInfo b err error executable string ) - callbackFuncs := machine.InitCleanup() + callbackFuncs := machine.CleanUp() defer callbackFuncs.CleanIfErr(&err) go callbackFuncs.CleanOnSignal() - if len(mc.Mounts) != 0 { - var ( - dirs *define.MachineDirs - gvproxyPID int - ) - dirs, err = machine.GetMachineDirs(h.VMType()) - if err != nil { - return err - } - // GvProxy PID file path is now derived - gvproxyPIDFile, err := dirs.RuntimeDir.AppendToNewVMFile("gvproxy.pid", nil) - if err != nil { - return err - } - gvproxyPID, err = gvproxyPIDFile.ReadPIDFrom() - if err != nil { - return err - } + if len(mc.Mounts) == 0 { + return nil + } - executable, err = os.Executable() - if err != nil { - return err - } - // Start the 9p server in the background - p9ServerArgs := []string{} - if logrus.IsLevelEnabled(logrus.DebugLevel) { - p9ServerArgs = append(p9ServerArgs, "--log-level=debug") - } - p9ServerArgs = append(p9ServerArgs, "machine", "server9p") + var ( + dirs *define.MachineDirs + gvproxyPID int + ) + dirs, err = env.GetMachineDirs(h.VMType()) + if err != nil { + return err + } + // GvProxy PID file path is now derived + gvproxyPIDFile, err := dirs.RuntimeDir.AppendToNewVMFile("gvproxy.pid", nil) + if err != nil { + return err + } + gvproxyPID, err = gvproxyPIDFile.ReadPIDFrom() + if err != nil { + return err + } - for _, mount := range mc.Mounts { - if mount.VSockNumber == nil { - return fmt.Errorf("mount %s has not vsock port defined", mount.Source) - } - p9ServerArgs = append(p9ServerArgs, "--serve", fmt.Sprintf("%s:%s", mount.Source, winio.VsockServiceID(uint32(*mount.VSockNumber)).String())) + executable, err = os.Executable() + if err != nil { + return err + } + // Start the 9p server in the background + p9ServerArgs := []string{} + if logrus.IsLevelEnabled(logrus.DebugLevel) { + p9ServerArgs = append(p9ServerArgs, "--log-level=debug") + } + p9ServerArgs = append(p9ServerArgs, "machine", "server9p") + + for _, mount := range mc.Mounts { + if mount.VSockNumber == nil { + return fmt.Errorf("mount %s has no vsock port defined", mount.Source) } - p9ServerArgs = append(p9ServerArgs, fmt.Sprintf("%d", gvproxyPID)) + p9ServerArgs = append(p9ServerArgs, "--serve", fmt.Sprintf("%s:%s", mount.Source, winio.VsockServiceID(uint32(*mount.VSockNumber)).String())) + } + p9ServerArgs = append(p9ServerArgs, fmt.Sprintf("%d", gvproxyPID)) - logrus.Debugf("Going to start 9p server using command: %s %v", executable, p9ServerArgs) + logrus.Debugf("Going to start 9p server using command: %s %v", executable, p9ServerArgs) - fsCmd := exec.Command(executable, p9ServerArgs...) + fsCmd := exec.Command(executable, p9ServerArgs...) - if logrus.IsLevelEnabled(logrus.DebugLevel) { - err = logCommandToFile(fsCmd, "podman-machine-server9.log") - if err != nil { - return err - } + if logrus.IsLevelEnabled(logrus.DebugLevel) { + err = logCommandToFile(fsCmd, "podman-machine-server9.log") + if err != nil { + return err } + } - err = fsCmd.Start() - if err == nil { - logrus.Infof("Started podman 9p server as PID %d", fsCmd.Process.Pid) - } + err = fsCmd.Start() + if err != nil { + return fmt.Errorf("unable to start 9p server: %v", err) + } + logrus.Infof("Started podman 9p server as PID %d", fsCmd.Process.Pid) - // Note: No callback is needed to stop the 9p server, because it will stop when - // gvproxy stops + // Note: No callback is needed to stop the 9p server, because it will stop when + // gvproxy stops - // Finalize starting shares after we are confident gvproxy is still alive. - err = startShares(mc) - } + // Finalize starting shares after we are confident gvproxy is still alive. + err = startShares(mc) return err } +func (h HyperVStubber) UpdateSSHPort(mc *vmconfigs.MachineConfig, port int) error { + // managed by gvproxy on this backend, so nothing to do + return nil +} + func (h HyperVStubber) GetDisk(userInputPath string, dirs *define.MachineDirs, mc *vmconfigs.MachineConfig) error { return diskpull.GetDisk(userInputPath, dirs, mc.ImagePath, h.VMType(), mc.Name) } @@ -489,12 +496,15 @@ func readAndSplitIgnition(mc *vmconfigs.MachineConfig, vm *hypervctl.VirtualMach } func removeIgnitionFromRegistry(vm *hypervctl.VirtualMachine) error { - pairs, err := vm.GetKeyValuePairs() - if err != nil { - return err - } - for key := range pairs { - if err := vm.RemoveKeyValuePair(key); err != nil { + // because the vm is down at this point, we cannot query hyperv for these key value pairs. + // therefore we blindly iterate from 0-50 and delete the key/value pairs. hyperv does not + // raise an error if the key is not present + // + for i := 0; i < 50; i++ { + // this is a well known "key" defined in libhvee and is the vm name + // plus an index starting at 0 + key := fmt.Sprintf("%s%d", vm.ElementName, i) + if err := vm.RemoveKeyValuePairNoWait(key); err != nil { return err } } @@ -502,7 +512,7 @@ func removeIgnitionFromRegistry(vm *hypervctl.VirtualMachine) error { } func logCommandToFile(c *exec.Cmd, filename string) error { - dir, err := machine.GetDataDir(define.HyperVVirt) + dir, err := env.GetDataDir(define.HyperVVirt) if err != nil { return fmt.Errorf("obtain machine dir: %w", err) } @@ -547,3 +557,7 @@ func createNetworkUnit(netPort uint64) (string, error) { netUnit.Add("Install", "WantedBy", "multi-user.target") return netUnit.ToString() } + +func (h HyperVStubber) GetRosetta(mc *vmconfigs.MachineConfig) (bool, error) { + return false, nil +} diff --git a/pkg/machine/hyperv/volumes.go b/pkg/machine/hyperv/volumes.go index 8a802a35b8..ea94147dbd 100644 --- a/pkg/machine/hyperv/volumes.go +++ b/pkg/machine/hyperv/volumes.go @@ -5,6 +5,8 @@ package hyperv import ( "errors" "fmt" + "path" + "strings" "github.com/containers/podman/v5/pkg/machine" "github.com/containers/podman/v5/pkg/machine/hyperv/vsock" @@ -29,7 +31,7 @@ func removeShares(mc *vmconfigs.MachineConfig) error { if err := vsockReg.Remove(); err != nil { if removalErr != nil { - logrus.Errorf("Error removing vsock: %w", removalErr) + logrus.Errorf("Error removing vsock: %v", removalErr) } removalErr = fmt.Errorf("removing vsock %d for mountpoint %s: %w", *mount.VSockNumber, mount.Target, err) } @@ -40,15 +42,26 @@ func removeShares(mc *vmconfigs.MachineConfig) error { func startShares(mc *vmconfigs.MachineConfig) error { for _, mount := range mc.Mounts { - args := []string{"-q", "--", "sudo", "podman"} + var args []string + cleanTarget := path.Clean(mount.Target) + requiresChattr := !strings.HasPrefix(cleanTarget, "/home") && !strings.HasPrefix(cleanTarget, "/mnt") + if requiresChattr { + args = append(args, "sudo", "chattr", "-i", "/", "; ") + } + args = append(args, "sudo", "mkdir", "-p", cleanTarget, "; ") + if requiresChattr { + args = append(args, "sudo", "chattr", "+i", "/", "; ") + } + + args = append(args, "sudo", "podman") if logrus.IsLevelEnabled(logrus.DebugLevel) { args = append(args, "--log-level=debug") } - //just being protective here; in a perfect world, this cannot happen + // just being protective here; in a perfect world, this cannot happen if mount.VSockNumber == nil { return errors.New("cannot start 9p shares with undefined vsock number") } - args = append(args, "machine", "client9p", fmt.Sprintf("%d", mount.VSockNumber), mount.Target) + args = append(args, "machine", "client9p", fmt.Sprintf("%d", *mount.VSockNumber), mount.Target) if err := machine.CommonSSH(mc.SSH.RemoteUsername, mc.SSH.IdentityPath, mc.Name, mc.SSH.Port, args); err != nil { return err diff --git a/pkg/machine/hyperv/vsock/vsock.go b/pkg/machine/hyperv/vsock/vsock.go index f92d212b08..202faafb38 100644 --- a/pkg/machine/hyperv/vsock/vsock.go +++ b/pkg/machine/hyperv/vsock/vsock.go @@ -5,6 +5,7 @@ package vsock import ( "errors" "fmt" + "io" "net" "strings" @@ -24,8 +25,8 @@ const ( HvsockPurpose = "Purpose" // VsockRegistryPath describes the registry path to where the hvsock registry entries live VsockRegistryPath = `SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\GuestCommunicationServices` - // LinuxVm is the default guid for a Linux VM on Windows - LinuxVm = "FACB-11E6-BD58-64006A7986D3" + // LinuxVM is the default guid for a Linux VM on Windows + LinuxVM = "FACB-11E6-BD58-64006A7986D3" ) // HVSockPurpose describes what the hvsock is needed for @@ -141,7 +142,6 @@ func (hv *HVSockRegistryEntry) validate() error { if len(hv.KeyName) < 1 { return errors.New("required field keypath is empty") } - //decimal_num, err = strconv.ParseInt(hexadecimal_num, 16, 64) return nil } @@ -150,7 +150,7 @@ func (hv *HVSockRegistryEntry) exists() (bool, error) { _ = foo _, err := openVSockRegistryEntry(hv.fqPath()) if err == nil { - return true, err + return true, nil } if errors.Is(err, registry.ErrNotExist) { return false, nil @@ -191,7 +191,7 @@ func findOpenHVSockPort() (uint64, error) { func NewHVSockRegistryEntry(machineName string, purpose HVSockPurpose) (*HVSockRegistryEntry, error) { // a so-called wildcard entry ... everything from FACB -> 6D3 is MS special sauce // for a " linux vm". this first segment is hexi for the hvsock port number - //00000400-FACB-11E6-BD58-64006A7986D3 + // 00000400-FACB-11E6-BD58-64006A7986D3 port, err := findOpenHVSockPort() if err != nil { return nil, err @@ -212,7 +212,7 @@ func portToKeyName(port uint64) string { // this could be flattened but given the complexity, I thought it might // be more difficult to read hexi := strings.ToUpper(fmt.Sprintf("%08x", port)) - return fmt.Sprintf("%s-%s", hexi, LinuxVm) + return fmt.Sprintf("%s-%s", hexi, LinuxVM) } func LoadHVSockRegistryEntry(port uint64) (*HVSockRegistryEntry, error) { @@ -259,21 +259,18 @@ func (hv *HVSockRegistryEntry) Listener() (net.Listener, error) { return listener, nil } -// Listen is used on the windows side to listen for anything to come -// over the hvsock as a signal the vm is booted -func (hv *HVSockRegistryEntry) Listen() error { +// ListenSetupWait creates an hvsock on the windows side and returns +// a wait function that, when called, blocks until it receives a ready +// notification on the vsock +func (hv *HVSockRegistryEntry) ListenSetupWait() (func() error, io.Closer, error) { listener, err := hv.Listener() if err != nil { - return err + return nil, nil, err } - defer func() { - if err := listener.Close(); err != nil { - logrus.Error(err) - } - }() errChan := make(chan error) go sockets.ListenAndWaitOnSocket(errChan, listener) - - return <-errChan + return func() error { + return <-errChan + }, listener, nil } diff --git a/pkg/machine/ignition/ignition.go b/pkg/machine/ignition/ignition.go index f0e5a38c53..58e7b622f1 100644 --- a/pkg/machine/ignition/ignition.go +++ b/pkg/machine/ignition/ignition.go @@ -8,10 +8,13 @@ import ( "io/fs" "net/url" "os" + "path" "path/filepath" + "runtime" "github.com/containers/podman/v5/pkg/machine/define" "github.com/containers/podman/v5/pkg/systemd/parser" + "github.com/containers/storage/pkg/fileutils" "github.com/sirupsen/logrus" ) @@ -63,6 +66,7 @@ type DynamicIgnition struct { Cfg Config Rootful bool NetRecover bool + Rosetta bool } func (ign *DynamicIgnition) Write() error { @@ -170,32 +174,6 @@ func (ign *DynamicIgnition) GenerateIgnitionConfig() error { ignStorage.Links = append(ignStorage.Links, tzLink) } - // Enables automatic login on the console; - // there's no security concerns here, and this makes debugging easier. - // xref https://docs.fedoraproject.org/en-US/fedora-coreos/tutorial-autologin/ - var autologinDropin = `[Service] -ExecStart= -ExecStart=-/usr/sbin/agetty --autologin root --noclear %I $TERM -` - - deMoby := parser.NewUnitFile() - deMoby.Add("Unit", "Description", "Remove moby-engine") - deMoby.Add("Unit", "After", "systemd-machine-id-commit.service") - deMoby.Add("Unit", "Before", "zincati.service") - deMoby.Add("Unit", "ConditionPathExists", "!/var/lib/%N.stamp") - - deMoby.Add("Service", "Type", "oneshot") - deMoby.Add("Service", "RemainAfterExit", "yes") - deMoby.Add("Service", "ExecStart", "/usr/bin/rpm-ostree override remove moby-engine") - deMoby.Add("Service", "ExecStart", "/usr/bin/rpm-ostree ex apply-live --allow-replacement") - deMoby.Add("Service", "ExecStartPost", "/bin/touch /var/lib/%N.stamp") - - deMoby.Add("Install", "WantedBy", "default.target") - deMobyFile, err := deMoby.ToString() - if err != nil { - return err - } - // This service gets environment variables that are provided // through qemu fw_cfg and then sets them into systemd/system.conf.d, // profile.d and environment.d files @@ -243,44 +221,13 @@ ExecStart=-/usr/sbin/agetty --autologin root --noclear %I $TERM Name: "podman.socket", }, { - Enabled: BoolToPtr(false), - Name: "docker.service", - Mask: BoolToPtr(true), - }, - { - Enabled: BoolToPtr(false), - Name: "docker.socket", - Mask: BoolToPtr(true), - }, - { - Enabled: BoolToPtr(true), - Name: "remove-moby.service", - Contents: &deMobyFile, - }, - { + // TODO Need to understand if this could play a role in machine + // updates given a certain configuration // Disable auto-updating of fcos images // https://github.com/containers/podman/issues/20122 Enabled: BoolToPtr(false), Name: "zincati.service", }, - { - Name: "serial-getty@.service", - Dropins: []Dropin{ - { - Name: "10-autologin.conf", - Contents: &autologinDropin, - }, - }, - }, - { - Name: "getty@.service", - Dropins: []Dropin{ - { - Name: "10-autologin.conf", - Contents: &autologinDropin, - }, - }, - }, }, } @@ -294,18 +241,17 @@ ExecStart=-/usr/sbin/agetty --autologin root --noclear %I $TERM ignSystemd.Units = append(ignSystemd.Units, qemuUnit) } - if ign.NetRecover { - contents, err := GetNetRecoveryUnitFile().ToString() - if err != nil { - return err - } - - recoveryUnit := Unit{ - Enabled: BoolToPtr(true), - Name: "net-health-recovery.service", - Contents: &contents, + // Only AppleHv with Apple Silicon can use Rosetta + if ign.VMType == define.AppleHvVirt && runtime.GOARCH == "arm64" { + rosettaUnit := Systemd{ + Units: []Unit{ + { + Enabled: BoolToPtr(true), + Name: "rosetta-activation.service", + }, + }, } - ignSystemd.Units = append(ignSystemd.Units, recoveryUnit) + ignSystemd.Units = append(ignSystemd.Units, rosettaUnit.Units...) } // Only after all checks are done @@ -344,43 +290,10 @@ func getDirs(usrName string) []Directory { dirs[i] = newDir } - // Issue #11489: make sure that we can inject a custom registries.conf - // file on the system level to force a single search registry. - // The remote client does not yet support prompting for short-name - // resolution, so we enforce a single search registry (i.e., docker.io) - // as a workaround. - dirs = append(dirs, Directory{ - Node: Node{ - Group: GetNodeGrp("root"), - Path: "/etc/containers/registries.conf.d", - User: GetNodeUsr("root"), - }, - DirectoryEmbedded1: DirectoryEmbedded1{Mode: IntToPtr(0755)}, - }) - - // The directory is used by envset-fwcfg.service - // for propagating environment variables that got - // from a host - dirs = append(dirs, Directory{ - Node: Node{ - Group: GetNodeGrp("root"), - Path: "/etc/systemd/system.conf.d", - User: GetNodeUsr("root"), - }, - DirectoryEmbedded1: DirectoryEmbedded1{Mode: IntToPtr(0755)}, - }, Directory{ - Node: Node{ - Group: GetNodeGrp("root"), - Path: "/etc/environment.d", - User: GetNodeUsr("root"), - }, - DirectoryEmbedded1: DirectoryEmbedded1{Mode: IntToPtr(0755)}, - }) - return dirs } -func getFiles(usrName string, uid int, rootful bool, vmtype define.VMType, netRecover bool) []File { +func getFiles(usrName string, uid int, rootful bool, vmtype define.VMType, _ bool) []File { files := make([]File, 0) lingerExample := parser.NewUnitFile() @@ -397,15 +310,12 @@ func getFiles(usrName string, uid int, rootful bool, vmtype define.VMType, netRe netns="bridge" pids_limit=0 ` + // TODO I think this can be removed but leaving breadcrumb until certain. // Set deprecated machine_enabled until podman package on fcos is // current enough to no longer require it - rootContainers := `[engine] -machine_enabled=true -` - - delegateConf := `[Service] -Delegate=memory pids cpu io -` + // rootContainers := `[engine] + // machine_enabled=true + // ` // Prevent subUID from clashing with actual UID subUID := 100000 subUIDs := 1000000 @@ -446,6 +356,7 @@ Delegate=memory pids cpu io Mode: IntToPtr(0744), }, }) + // Set up /etc/subuid and /etc/subgid for _, sub := range []string{"/etc/subuid", "/etc/subgid"} { files = append(files, File{ @@ -465,50 +376,8 @@ Delegate=memory pids cpu io }) } - // Set delegate.conf so cpu,io subsystem is delegated to non-root users as well for cgroupv2 - // by default - files = append(files, File{ - Node: Node{ - Group: GetNodeGrp("root"), - Path: "/etc/systemd/system/user@.service.d/delegate.conf", - User: GetNodeUsr("root"), - }, - FileEmbedded1: FileEmbedded1{ - Append: nil, - Contents: Resource{ - Source: EncodeDataURLPtr(delegateConf), - }, - Mode: IntToPtr(0644), - }, - }) - - // Add a file into linger - files = append(files, File{ - Node: Node{ - Group: GetNodeGrp(usrName), - Path: "/var/lib/systemd/linger/core", - User: GetNodeUsr(usrName), - }, - FileEmbedded1: FileEmbedded1{Mode: IntToPtr(0644)}, - }) - - // Set deprecated machine_enabled to true to indicate we're in a VM - files = append(files, File{ - Node: Node{ - Group: GetNodeGrp("root"), - Path: "/etc/containers/containers.conf", - User: GetNodeUsr("root"), - }, - FileEmbedded1: FileEmbedded1{ - Append: nil, - Contents: Resource{ - Source: EncodeDataURLPtr(rootContainers), - }, - Mode: IntToPtr(0644), - }, - }) - - // Set machine marker file to indicate podman is in a qemu based machine + // Set machine marker file to indicate podman what vmtype we are + // operating under files = append(files, File{ Node: Node{ Group: GetNodeGrp("root"), @@ -524,42 +393,6 @@ Delegate=memory pids cpu io }, }) - // Increase the number of inotify instances. - files = append(files, File{ - Node: Node{ - Group: GetNodeGrp("root"), - Path: "/etc/sysctl.d/10-inotify-instances.conf", - User: GetNodeUsr("root"), - }, - FileEmbedded1: FileEmbedded1{ - Append: nil, - Contents: Resource{ - Source: EncodeDataURLPtr("fs.inotify.max_user_instances=524288\n"), - }, - Mode: IntToPtr(0644), - }, - }) - - // Issue #11489: make sure that we can inject a custom registries.conf - // file on the system level to force a single search registry. - // The remote client does not yet support prompting for short-name - // resolution, so we enforce a single search registry (i.e., docker.io) - // as a workaround. - files = append(files, File{ - Node: Node{ - Group: GetNodeGrp("root"), - Path: "/etc/containers/registries.conf.d/999-podman-machine.conf", - User: GetNodeUsr("root"), - }, - FileEmbedded1: FileEmbedded1{ - Append: nil, - Contents: Resource{ - Source: EncodeDataURLPtr("unqualified-search-registries=[\"docker.io\"]\n"), - }, - Mode: IntToPtr(0644), - }, - }) - files = append(files, File{ Node: Node{ Path: PodmanDockerTmpConfPath, @@ -574,24 +407,6 @@ Delegate=memory pids cpu io }, }) - setDockerHost := `export DOCKER_HOST="unix://$(podman info -f "{{.Host.RemoteSocket.Path}}")" -` - - files = append(files, File{ - Node: Node{ - Group: GetNodeGrp("root"), - Path: "/etc/profile.d/docker-host.sh", - User: GetNodeUsr("root"), - }, - FileEmbedded1: FileEmbedded1{ - Append: nil, - Contents: Resource{ - Source: EncodeDataURLPtr(setDockerHost), - }, - Mode: IntToPtr(0644), - }, - }) - // get certs for current user userHome, err := os.UserHomeDir() if err != nil { @@ -605,67 +420,28 @@ Delegate=memory pids cpu io certFiles = getCerts(filepath.Join(userHome, ".config/docker/certs.d"), true) files = append(files, certFiles...) - if sslCertFile, ok := os.LookupEnv("SSL_CERT_FILE"); ok { - if _, err := os.Stat(sslCertFile); err == nil { - certFiles = getCerts(sslCertFile, false) + sslCertFileName, ok := os.LookupEnv(sslCertFile) + if ok { + if err := fileutils.Exists(sslCertFileName); err == nil { + certFiles = getCerts(sslCertFileName, false) files = append(files, certFiles...) } else { - logrus.Warnf("Invalid path in SSL_CERT_FILE: %q", err) + logrus.Warnf("Invalid path in %s: %q", sslCertFile, err) } } - if sslCertDir, ok := os.LookupEnv("SSL_CERT_DIR"); ok { - if _, err := os.Stat(sslCertDir); err == nil { - certFiles = getCerts(sslCertDir, true) + sslCertDirName, ok := os.LookupEnv(sslCertDir) + if ok { + if err := fileutils.Exists(sslCertDirName); err == nil { + certFiles = getCerts(sslCertDirName, true) files = append(files, certFiles...) } else { - logrus.Warnf("Invalid path in SSL_CERT_DIR: %q", err) + logrus.Warnf("Invalid path in %s: %q", sslCertDir, err) } } - - files = append(files, File{ - Node: Node{ - User: GetNodeUsr("root"), - Group: GetNodeGrp("root"), - Path: "/etc/chrony.conf", - }, - FileEmbedded1: FileEmbedded1{ - Append: []Resource{{ - Source: EncodeDataURLPtr("\nconfdir /etc/chrony.d\n"), - }}, - }, - }) - - // Issue #11541: allow Chrony to update the system time when it has drifted - // far from NTP time. - files = append(files, File{ - Node: Node{ - User: GetNodeUsr("root"), - Group: GetNodeGrp("root"), - Path: "/etc/chrony.d/50-podman-makestep.conf", - }, - FileEmbedded1: FileEmbedded1{ - Contents: Resource{ - Source: EncodeDataURLPtr("makestep 1 -1\n"), - }, - }, - }) - - // Only necessary for qemu on mac - if netRecover { - files = append(files, File{ - Node: Node{ - User: GetNodeUsr("root"), - Group: GetNodeGrp("root"), - Path: "/usr/local/bin/net-health-recovery.sh", - }, - FileEmbedded1: FileEmbedded1{ - Mode: IntToPtr(0755), - Contents: Resource{ - Source: EncodeDataURLPtr(GetNetRecoveryFile()), - }, - }, - }) + if sslCertFileName != "" || sslCertDirName != "" { + // If we copied certs via env then also make the to set the env in the VM. + files = append(files, getSSLEnvironmentFiles(sslCertFileName, sslCertDirName)...) } return files @@ -709,16 +485,18 @@ func getCerts(certsDir string, isDir bool) []File { return files } -func prepareCertFile(path string, name string) (File, error) { - b, err := os.ReadFile(path) +func prepareCertFile(fpath string, name string) (File, error) { + b, err := os.ReadFile(fpath) if err != nil { logrus.Warnf("Unable to read cert file %v", err) return File{}, err } - targetPath := filepath.Join(define.UserCertsTargetPath, name) + // Note path is required here as we always create a path for the linux VM + // even when the client run on windows so we cannot use filepath. + targetPath := path.Join(define.UserCertsTargetPath, name) - logrus.Debugf("Copying cert file from '%s' to '%s'.", path, targetPath) + logrus.Debugf("Copying cert file from '%s' to '%s'.", fpath, targetPath) file := File{ Node: Node{ @@ -737,6 +515,57 @@ func prepareCertFile(path string, name string) (File, error) { return file, nil } +const ( + systemdSSLConf = "/etc/systemd/system.conf.d/podman-machine-ssl.conf" + envdSSLConf = "/etc/environment.d/podman-machine-ssl.conf" + profileSSLConf = "/etc/profile.d/podman-machine-ssl.sh" + sslCertFile = "SSL_CERT_FILE" + sslCertDir = "SSL_CERT_DIR" +) + +func getSSLEnvironmentFiles(sslFileName, sslDirName string) []File { + systemdFileContent := "[Manager]\n" + envdFileContent := "" + profileFileContent := "" + if sslFileName != "" { + // certs are written to UserCertsTargetPath see prepareCertFile() + // Note the mix of path/filepath is intentional and required, we want to get the name of + // a path on the client (i.e. windows) but then join to linux path that will be used inside the VM. + env := fmt.Sprintf("%s=%q\n", sslCertFile, path.Join(define.UserCertsTargetPath, filepath.Base(sslFileName))) + systemdFileContent += "DefaultEnvironment=" + env + envdFileContent += env + profileFileContent += "export " + env + } + if sslDirName != "" { + // certs are written to UserCertsTargetPath see prepareCertFile() + env := fmt.Sprintf("%s=%q\n", sslCertDir, define.UserCertsTargetPath) + systemdFileContent += "DefaultEnvironment=" + env + envdFileContent += env + profileFileContent += "export " + env + } + return []File{ + getSSLFile(systemdSSLConf, systemdFileContent), + getSSLFile(envdSSLConf, envdFileContent), + getSSLFile(profileSSLConf, profileFileContent), + } +} + +func getSSLFile(path, content string) File { + return File{ + Node: Node{ + Group: GetNodeGrp("root"), + Path: path, + User: GetNodeUsr("root"), + }, + FileEmbedded1: FileEmbedded1{ + Contents: Resource{ + Source: EncodeDataURLPtr(content), + }, + Mode: IntToPtr(0644), + }, + } +} + func getLinks(usrName string) []Link { return []Link{{ Node: Node{ @@ -871,7 +700,7 @@ func GetNetRecoveryUnitFile() *parser.UnitFile { func DefaultReadyUnitFile() parser.UnitFile { u := parser.NewUnitFile() - u.Add("Unit", "After", "remove-moby.service sshd.socket sshd.service") + u.Add("Unit", "After", "sshd.socket sshd.service") u.Add("Unit", "OnFailure", "emergency.target") u.Add("Unit", "OnFailureJobMode", "isolate") u.Add("Service", "Type", "oneshot") diff --git a/pkg/machine/ignition/ready.go b/pkg/machine/ignition/ready.go index 42a50dcfda..8ee5156a07 100644 --- a/pkg/machine/ignition/ready.go +++ b/pkg/machine/ignition/ready.go @@ -1,3 +1,5 @@ +//go:build amd64 || arm64 + package ignition import ( @@ -21,14 +23,16 @@ func CreateReadyUnitFile(provider define.VMType, opts *ReadyUnitOpts) (string, e readyUnit.Add("Unit", "Requires", "dev-virtio\\x2dports-vport1p1.device") readyUnit.Add("Unit", "After", "systemd-user-sessions.service") readyUnit.Add("Service", "ExecStart", "/bin/sh -c '/usr/bin/echo Ready >/dev/vport1p1'") - case define.AppleHvVirt: + case define.AppleHvVirt, define.LibKrun: readyUnit.Add("Unit", "Requires", "dev-virtio\\x2dports-vsock.device") readyUnit.Add("Service", "ExecStart", "/bin/sh -c '/usr/bin/echo Ready | socat - VSOCK-CONNECT:2:1025'") case define.HyperVVirt: if opts == nil || opts.Port == 0 { return "", errors.New("no port provided for hyperv ready unit") } + readyUnit.Add("Unit", "Requires", "sys-devices-virtual-net-vsock0.device") readyUnit.Add("Unit", "After", "systemd-user-sessions.service") + readyUnit.Add("Unit", "After", "vsock-network.service") readyUnit.Add("Service", "ExecStart", fmt.Sprintf("/bin/sh -c '/usr/bin/echo Ready | socat - VSOCK-CONNECT:2:%d'", opts.Port)) case define.WSLVirt: // WSL does not use ignition return "", nil diff --git a/pkg/machine/keys.go b/pkg/machine/keys.go index e151aa1174..e4c37cb57c 100644 --- a/pkg/machine/keys.go +++ b/pkg/machine/keys.go @@ -11,6 +11,7 @@ import ( "path/filepath" "strings" + "github.com/containers/storage/pkg/fileutils" "github.com/sirupsen/logrus" ) @@ -20,7 +21,7 @@ var sshCommand = []string{"ssh-keygen", "-N", "", "-t", "ed25519", "-f"} // the a VM. func CreateSSHKeys(writeLocation string) (string, error) { // If the SSH key already exists, hard fail - if _, err := os.Stat(writeLocation); err == nil { + if err := fileutils.Exists(writeLocation); err == nil { return "", fmt.Errorf("SSH key already exists: %s", writeLocation) } if err := os.MkdirAll(filepath.Dir(writeLocation), 0700); err != nil { @@ -39,7 +40,7 @@ func CreateSSHKeys(writeLocation string) (string, error) { // GetSSHKeys checks to see if there is a ssh key at the provided location. // If not, we create the priv and pub keys. The ssh key is then returned. func GetSSHKeys(identityPath string) (string, error) { - if _, err := os.Stat(identityPath); err == nil { + if err := fileutils.Exists(identityPath); err == nil { b, err := os.ReadFile(identityPath + ".pub") if err != nil { return "", err @@ -51,7 +52,7 @@ func GetSSHKeys(identityPath string) (string, error) { } func CreateSSHKeysPrefix(identityPath string, passThru bool, skipExisting bool, prefix ...string) (string, error) { - _, e := os.Stat(identityPath) + e := fileutils.Exists(identityPath) if !skipExisting || errors.Is(e, os.ErrNotExist) { if err := generatekeysPrefix(identityPath, passThru, prefix...); err != nil { return "", err diff --git a/pkg/machine/libkrun/stubber.go b/pkg/machine/libkrun/stubber.go new file mode 100644 index 0000000000..1749b113c6 --- /dev/null +++ b/pkg/machine/libkrun/stubber.go @@ -0,0 +1,145 @@ +//go:build darwin + +package libkrun + +import ( + "fmt" + "strconv" + + gvproxy "github.com/containers/gvisor-tap-vsock/pkg/types" + "github.com/containers/podman/v5/pkg/machine" + "github.com/containers/podman/v5/pkg/machine/apple" + "github.com/containers/podman/v5/pkg/machine/apple/vfkit" + "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/ignition" + "github.com/containers/podman/v5/pkg/machine/shim/diskpull" + "github.com/containers/podman/v5/pkg/machine/vmconfigs" + "github.com/containers/podman/v5/utils" + vfConfig "github.com/crc-org/vfkit/pkg/config" +) + +const ( + krunkitBinary = "krunkit" + localhostURI = "http://localhost" +) + +type LibKrunStubber struct { + vmconfigs.AppleHVConfig +} + +func (l LibKrunStubber) CreateVM(opts define.CreateVMOpts, mc *vmconfigs.MachineConfig, builder *ignition.IgnitionBuilder) error { + mc.LibKrunHypervisor = new(vmconfigs.LibKrunConfig) + mc.LibKrunHypervisor.KRun = vfkit.Helper{} + + bl := vfConfig.NewEFIBootloader(fmt.Sprintf("%s/efi-bl-%s", opts.Dirs.DataDir.GetPath(), opts.Name), true) + mc.LibKrunHypervisor.KRun.VirtualMachine = vfConfig.NewVirtualMachine(uint(mc.Resources.CPUs), uint64(mc.Resources.Memory), bl) + + randPort, err := utils.GetRandomPort() + if err != nil { + return err + } + mc.LibKrunHypervisor.KRun.Endpoint = localhostURI + ":" + strconv.Itoa(randPort) + + virtiofsMounts := make([]machine.VirtIoFs, 0, len(mc.Mounts)) + for _, mnt := range mc.Mounts { + virtiofsMounts = append(virtiofsMounts, machine.MountToVirtIOFs(mnt)) + } + + // Populate the ignition file with virtiofs stuff + virtIOIgnitionMounts, err := apple.GenerateSystemDFilesForVirtiofsMounts(virtiofsMounts) + if err != nil { + return err + } + builder.WithUnit(virtIOIgnitionMounts...) + + return apple.ResizeDisk(mc, mc.Resources.DiskSize) +} + +func (l LibKrunStubber) GetDisk(userInputPath string, dirs *define.MachineDirs, mc *vmconfigs.MachineConfig) error { + return diskpull.GetDisk(userInputPath, dirs, mc.ImagePath, l.VMType(), mc.Name) +} + +func (l LibKrunStubber) PrepareIgnition(mc *vmconfigs.MachineConfig, ignBuilder *ignition.IgnitionBuilder) (*ignition.ReadyUnitOpts, error) { + return nil, nil +} + +func (l LibKrunStubber) Exists(name string) (bool, error) { + // not applicable for libkrun (same as applehv) + return false, nil +} + +func (l LibKrunStubber) MountType() vmconfigs.VolumeMountType { + return vmconfigs.VirtIOFS +} + +func (l LibKrunStubber) MountVolumesToVM(mc *vmconfigs.MachineConfig, quiet bool) error { + return nil +} + +func (l LibKrunStubber) Remove(mc *vmconfigs.MachineConfig) ([]string, func() error, error) { + return []string{}, func() error { return nil }, nil +} + +func (l LibKrunStubber) RemoveAndCleanMachines(dirs *define.MachineDirs) error { + return nil +} + +func (l LibKrunStubber) SetProviderAttrs(mc *vmconfigs.MachineConfig, opts define.SetOptions) error { + state, err := l.State(mc, false) + if err != nil { + return err + } + return apple.SetProviderAttrs(mc, opts, state) +} + +func (l LibKrunStubber) StartNetworking(mc *vmconfigs.MachineConfig, cmd *gvproxy.GvproxyCommand) error { + return apple.StartGenericNetworking(mc, cmd) +} + +func (l LibKrunStubber) PostStartNetworking(mc *vmconfigs.MachineConfig, noInfo bool) error { + return nil +} + +func (l LibKrunStubber) StartVM(mc *vmconfigs.MachineConfig) (func() error, func() error, error) { + bl := mc.LibKrunHypervisor.KRun.VirtualMachine.Bootloader + if bl == nil { + return nil, nil, fmt.Errorf("unable to determine boot loader for this machine") + } + return apple.StartGenericAppleVM(mc, krunkitBinary, bl, mc.LibKrunHypervisor.KRun.Endpoint) +} + +func (l LibKrunStubber) State(mc *vmconfigs.MachineConfig, bypass bool) (define.Status, error) { + return mc.LibKrunHypervisor.KRun.State() +} + +func (l LibKrunStubber) StopVM(mc *vmconfigs.MachineConfig, hardStop bool) error { + return mc.LibKrunHypervisor.KRun.Stop(hardStop, true) +} + +func (l LibKrunStubber) StopHostNetworking(mc *vmconfigs.MachineConfig, vmType define.VMType) error { + return nil +} + +func (l LibKrunStubber) VMType() define.VMType { + return define.LibKrun +} + +func (l LibKrunStubber) UserModeNetworkEnabled(mc *vmconfigs.MachineConfig) bool { + return true +} + +func (l LibKrunStubber) UseProviderNetworkSetup() bool { + return false +} + +func (l LibKrunStubber) RequireExclusiveActive() bool { + return true +} + +func (l LibKrunStubber) UpdateSSHPort(mc *vmconfigs.MachineConfig, port int) error { + return nil +} + +func (l LibKrunStubber) GetRosetta(mc *vmconfigs.MachineConfig) (bool, error) { + return false, nil +} diff --git a/pkg/machine/lock/lock.go b/pkg/machine/lock/lock.go index ede7075c9f..dbe941eb95 100644 --- a/pkg/machine/lock/lock.go +++ b/pkg/machine/lock/lock.go @@ -4,6 +4,7 @@ import ( "fmt" "path/filepath" + "github.com/containers/podman/v5/pkg/machine/env" "github.com/containers/storage/pkg/lockfile" ) @@ -15,3 +16,21 @@ func GetMachineLock(name string, machineConfigDir string) (*lockfile.LockFile, e } return lock, nil } + +const machineStartLockName = "machine-start.lock" + +// GetMachineStartLock is a lock only used to prevent starting different machines at the same time, +// This is required as most provides support at max 1 running VM and to check this race free we +// cannot allows starting two machine. +func GetMachineStartLock() (*lockfile.LockFile, error) { + lockDir, err := env.GetGlobalDataDir() + if err != nil { + return nil, err + } + + lock, err := lockfile.GetLockFile(filepath.Join(lockDir, machineStartLockName)) + if err != nil { + return nil, err + } + return lock, nil +} diff --git a/pkg/machine/machine_common.go b/pkg/machine/machine_common.go index a8eb8be396..1afc3d15b3 100644 --- a/pkg/machine/machine_common.go +++ b/pkg/machine/machine_common.go @@ -81,7 +81,7 @@ address can't be used by podman. ` fmtString = `You can %sconnect Docker API clients by setting DOCKER_HOST using the following command in your terminal session: - %s' + %s ` prefix := "" diff --git a/pkg/machine/machine_windows.go b/pkg/machine/machine_windows.go index 3e19f4d98e..7d39795bc5 100644 --- a/pkg/machine/machine_windows.go +++ b/pkg/machine/machine_windows.go @@ -17,14 +17,16 @@ import ( winio "github.com/Microsoft/go-winio" "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/env" + "github.com/containers/storage/pkg/fileutils" "github.com/sirupsen/logrus" ) const ( NamedPipePrefix = "npipe:////./pipe/" GlobalNamedPipe = "docker_engine" - winSShProxy = "win-sshproxy.exe" - winSshProxyTid = "win-sshproxy.tid" + winSSHProxy = "win-sshproxy.exe" + winSSHProxyTid = "win-sshproxy.tid" rootfulSock = "/run/podman/podman.sock" rootlessSock = "/run/user/1000/podman/podman.sock" @@ -33,7 +35,8 @@ const ( GlobalNameWait = 250 * time.Millisecond ) -const WM_QUIT = 0x12 //nolint +//nolint:stylecheck +const WM_QUIT = 0x12 type WinProxyOpts struct { Name string @@ -63,7 +66,7 @@ func PipeNameAvailable(pipeName string, maxWait time.Duration) bool { const interval = 250 * time.Millisecond var wait time.Duration for { - _, err := os.Stat(`\\.\pipe\` + pipeName) + err := fileutils.Exists(`\\.\pipe\` + pipeName) if errors.Is(err, fs.ErrNotExist) { return true } @@ -78,7 +81,7 @@ func PipeNameAvailable(pipeName string, maxWait time.Duration) bool { func WaitPipeExists(pipeName string, retries int, checkFailure func() error) error { var err error for i := 0; i < retries; i++ { - _, err = os.Stat(`\\.\pipe\` + pipeName) + err = fileutils.Exists(`\\.\pipe\` + pipeName) if err == nil { break } @@ -92,7 +95,7 @@ func WaitPipeExists(pipeName string, retries int, checkFailure func() error) err } func DialNamedPipe(ctx context.Context, path string) (net.Conn, error) { - path = strings.Replace(path, "/", "\\", -1) + path = strings.ReplaceAll(path, "/", "\\") return winio.DialPipeContext(ctx, path) } @@ -121,7 +124,7 @@ func LaunchWinProxy(opts WinProxyOpts, noInfo bool) { } func launchWinProxy(opts WinProxyOpts) (bool, string, error) { - machinePipe := ToDist(opts.Name) + machinePipe := env.WithPodmanPrefix(opts.Name) if !PipeNameAvailable(machinePipe, MachineNameWait) { return false, "", fmt.Errorf("could not start api proxy since expected pipe is not available: %s", machinePipe) } @@ -131,7 +134,7 @@ func launchWinProxy(opts WinProxyOpts) (bool, string, error) { globalName = true } - command, err := FindExecutablePeer(winSShProxy) + command, err := FindExecutablePeer(winSSHProxy) if err != nil { return globalName, "", err } @@ -181,6 +184,7 @@ func StopWinProxy(name string, vmtype define.VMType) error { proc, err := os.FindProcess(int(pid)) if err != nil { + //nolint:nilerr return nil } sendQuit(tid) @@ -196,7 +200,7 @@ func readWinProxyTid(name string, vmtype define.VMType) (uint32, uint32, string, return 0, 0, "", err } - tidFile := filepath.Join(stateDir, winSshProxyTid) + tidFile := filepath.Join(stateDir, winSSHProxyTid) contents, err := os.ReadFile(tidFile) if err != nil { return 0, 0, "", err @@ -210,13 +214,13 @@ func readWinProxyTid(name string, vmtype define.VMType) (uint32, uint32, string, func waitTimeout(proc *os.Process, timeout time.Duration) bool { done := make(chan bool) go func() { - proc.Wait() + _, _ = proc.Wait() done <- true }() ret := false select { case <-time.After(timeout): - proc.Kill() + _ = proc.Kill() <-done case <-done: ret = true @@ -229,7 +233,8 @@ func waitTimeout(proc *os.Process, timeout time.Duration) bool { func sendQuit(tid uint32) { user32 := syscall.NewLazyDLL("user32.dll") postMessage := user32.NewProc("PostThreadMessageW") - postMessage.Call(uintptr(tid), WM_QUIT, 0, 0) + //nolint:dogsled + _, _, _ = postMessage.Call(uintptr(tid), WM_QUIT, 0, 0) } func FindExecutablePeer(name string) (string, error) { @@ -247,7 +252,7 @@ func FindExecutablePeer(name string) (string, error) { } func GetWinProxyStateDir(name string, vmtype define.VMType) (string, error) { - dir, err := GetDataDir(vmtype) + dir, err := env.GetDataDir(vmtype) if err != nil { return "", err } @@ -259,13 +264,6 @@ func GetWinProxyStateDir(name string, vmtype define.VMType) (string, error) { return stateDir, nil } -func ToDist(name string) string { - if !strings.HasPrefix(name, "podman") { - name = "podman-" + name - } - return name -} - func GetEnvSetString(env string, val string) string { return fmt.Sprintf("$Env:%s=\"%s\"", env, val) } diff --git a/pkg/machine/ocipull/ociartifact.go b/pkg/machine/ocipull/ociartifact.go index 9d59fb843d..587c5cfdd9 100644 --- a/pkg/machine/ocipull/ociartifact.go +++ b/pkg/machine/ocipull/ociartifact.go @@ -24,15 +24,15 @@ import ( ) const ( - // TODO This is temporary until we decide on a proper image name artifactRegistry = "quay.io" - artifactRepo = "baude" - artifactImageName = "podman-machine-images-art" + artifactRepo = "podman" + artifactImageName = "machine-os" artifactOriginalName = "org.opencontainers.image.title" machineOS = "linux" ) type OCIArtifactDisk struct { + cache bool cachedCompressedDiskPath *define.VMFile name string ctx context.Context @@ -71,7 +71,7 @@ type DiskArtifactOpts struct { */ -func NewOCIArtifactPull(ctx context.Context, dirs *define.MachineDirs, vmName string, vmType define.VMType, finalPath *define.VMFile) (*OCIArtifactDisk, error) { +func NewOCIArtifactPull(ctx context.Context, dirs *define.MachineDirs, endpoint string, vmName string, vmType define.VMType, finalPath *define.VMFile) (*OCIArtifactDisk, error) { var ( arch string ) @@ -88,15 +88,23 @@ func NewOCIArtifactPull(ctx context.Context, dirs *define.MachineDirs, vmName st diskOpts := DiskArtifactOpts{ arch: arch, - diskType: vmType.String(), + diskType: vmType.DiskType(), os: machineOS, } + + cache := false + if endpoint == "" { + endpoint = fmt.Sprintf("docker://%s/%s/%s:%s", artifactRegistry, artifactRepo, artifactImageName, artifactVersion.majorMinor()) + cache = true + } + ociDisk := OCIArtifactDisk{ ctx: ctx, + cache: cache, dirs: dirs, diskArtifactOpts: &diskOpts, finalPath: finalPath.GetPath(), - imageEndpoint: fmt.Sprintf("docker://%s/%s/%s:%s", artifactRegistry, artifactRepo, artifactImageName, artifactVersion.majorMinor()), + imageEndpoint: endpoint, machineVersion: artifactVersion, name: vmName, pullOptions: &PullOptions{}, @@ -105,25 +113,51 @@ func NewOCIArtifactPull(ctx context.Context, dirs *define.MachineDirs, vmName st return &ociDisk, nil } +func (o *OCIArtifactDisk) OriginalFileName() (string, string) { + return o.cachedCompressedDiskPath.GetPath(), o.diskArtifactFileName +} + func (o *OCIArtifactDisk) Get() error { - destRef, artifactDigest, err := o.getDestArtifact() + cleanCache, err := o.get() if err != nil { return err } + if cleanCache != nil { + defer cleanCache() + } + return o.decompress() +} + +func (o *OCIArtifactDisk) GetNoCompress() (func(), error) { + return o.get() +} + +func (o *OCIArtifactDisk) get() (func(), error) { + cleanCache := func() {} + + destRef, artifactDigest, err := o.getDestArtifact() + if err != nil { + return nil, err + } // Note: the artifactDigest here is the hash of the most recent disk image available cachedImagePath, err := o.dirs.ImageCacheDir.AppendToNewVMFile(fmt.Sprintf("%s.%s", artifactDigest.Encoded(), o.vmType.ImageFormat().KindWithCompression()), nil) if err != nil { - return err + return nil, err } + // check if we have the latest and greatest disk image if _, err = os.Stat(cachedImagePath.GetPath()); err != nil { if !errors.Is(err, os.ErrNotExist) { - return fmt.Errorf("unable to access cached image path %q: %q", cachedImagePath.GetPath(), err) + return nil, fmt.Errorf("unable to access cached image path %q: %q", cachedImagePath.GetPath(), err) } + + // On cache misses, we clean out the cache + cleanCache = o.cleanCache(cachedImagePath.GetPath()) + // pull the image down to our local filesystem if err := o.pull(destRef, artifactDigest); err != nil { - return err + return nil, fmt.Errorf("failed to pull %s: %w", destRef.DockerReference(), err) } // grab the artifact disk out of the cache and lay // it into our local cache in the format of @@ -133,13 +167,46 @@ func (o *OCIArtifactDisk) Get() error { // // i.e. 91d1e51...d28974.qcow2.xz if err := o.unpack(artifactDigest); err != nil { - return err + return nil, err } } else { logrus.Debugf("cached image exists and is latest: %s", cachedImagePath.GetPath()) o.cachedCompressedDiskPath = cachedImagePath } - return o.decompress() + return cleanCache, nil +} + +func (o *OCIArtifactDisk) cleanCache(cachedImagePath string) func() { + // cache miss while using an image that we cache, ie the default image + // clean out all old files fron the cache dir + if o.cache { + files, err := os.ReadDir(o.dirs.ImageCacheDir.GetPath()) + if err != nil { + logrus.Warn("failed to clean machine image cache: ", err) + return nil + } + + return func() { + for _, file := range files { + path := filepath.Join(o.dirs.ImageCacheDir.GetPath(), file.Name()) + logrus.Debugf("cleaning cached file: %s", path) + err := utils.GuardedRemoveAll(path) + if err != nil && !errors.Is(err, os.ErrNotExist) { + logrus.Warn("failed to clean machine image cache: ", err) + } + } + } + } else { + // using an image that we don't cache, ie not the default image + // delete image after use and don't cache + return func() { + logrus.Debugf("cleaning cache: %s", o.dirs.ImageCacheDir.GetPath()) + err := os.Remove(cachedImagePath) + if err != nil && !errors.Is(err, os.ErrNotExist) { + logrus.Warn("failed to clean pulled machine image: ", err) + } + } + } } func (o *OCIArtifactDisk) getDestArtifact() (types.ImageReference, digest.Digest, error) { @@ -147,6 +214,7 @@ func (o *OCIArtifactDisk) getDestArtifact() (types.ImageReference, digest.Digest if err != nil { return nil, "", err } + fmt.Printf("Looking up Podman Machine image at %s to create VM\n", imgRef.DockerReference()) sysCtx := &types.SystemContext{ DockerInsecureSkipTLSVerify: types.NewOptionalBool(!o.pullOptions.TLSVerify), } diff --git a/pkg/machine/ocipull/policy.go b/pkg/machine/ocipull/policy.go new file mode 100644 index 0000000000..d2f929e70b --- /dev/null +++ b/pkg/machine/ocipull/policy.go @@ -0,0 +1,33 @@ +package ocipull + +import ( + "os" + "path/filepath" + + "github.com/sirupsen/logrus" +) + +// DefaultPolicyJSONPath should be overwritten at build time with the real path to the directory where +// the shipped policy.json file is located. This can either be absolute path or a relative path. If it +// is relative it will be resolved relative to the podman binary and NOT the CWD. +// +// use "-X github.com/containers/podman/v5/pkg/machine/ocipull.DefaultPolicyJSONPath=/somepath" in go ldflags to overwrite this +var DefaultPolicyJSONPath = "" + +const policyfile = "policy.json" + +// policyPaths returns a slice of possible directories where a policy.json might live +func policyPaths() []string { + paths := localPolicyOverwrites() + if DefaultPolicyJSONPath != "" { + if filepath.IsAbs(DefaultPolicyJSONPath) { + return append(paths, filepath.Join(DefaultPolicyJSONPath, policyfile)) + } + p, err := os.Executable() + if err != nil { + logrus.Warnf("could not resolve relative path to binary: %q", err) + } + paths = append(paths, filepath.Join(filepath.Dir(p), DefaultPolicyJSONPath, policyfile)) + } + return paths +} diff --git a/pkg/machine/ocipull/policy.json b/pkg/machine/ocipull/policy.json new file mode 100644 index 0000000000..bb26e57ff2 --- /dev/null +++ b/pkg/machine/ocipull/policy.json @@ -0,0 +1,7 @@ +{ + "default": [ + { + "type": "insecureAcceptAnything" + } + ] +} diff --git a/pkg/machine/ocipull/policy_unix.go b/pkg/machine/ocipull/policy_unix.go new file mode 100644 index 0000000000..2fd0443583 --- /dev/null +++ b/pkg/machine/ocipull/policy_unix.go @@ -0,0 +1,19 @@ +//go:build !windows + +package ocipull + +import ( + "path/filepath" + + "github.com/containers/common/pkg/config" + "github.com/containers/storage/pkg/homedir" +) + +func localPolicyOverwrites() []string { + var dirs []string + if p, err := homedir.GetConfigHome(); err == nil { + dirs = append(dirs, filepath.Join(p, "containers", policyfile)) + } + dirs = append(dirs, config.DefaultSignaturePolicyPath) + return dirs +} diff --git a/pkg/machine/ocipull/policy_windows.go b/pkg/machine/ocipull/policy_windows.go new file mode 100644 index 0000000000..3a1c31932c --- /dev/null +++ b/pkg/machine/ocipull/policy_windows.go @@ -0,0 +1,10 @@ +package ocipull + +import ( + "os" + "path/filepath" +) + +func localPolicyOverwrites() []string { + return []string{filepath.Join(os.Getenv("APPDATA"), "containers", policyfile)} +} diff --git a/pkg/machine/ocipull/pull.go b/pkg/machine/ocipull/pull.go index e484964da7..0822578e8a 100644 --- a/pkg/machine/ocipull/pull.go +++ b/pkg/machine/ocipull/pull.go @@ -2,7 +2,9 @@ package ocipull import ( "context" + "errors" "fmt" + "io/fs" "os" "github.com/containers/buildah/pkg/parse" @@ -13,6 +15,7 @@ import ( "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" "github.com/containers/podman/v5/pkg/machine/define" + "github.com/sirupsen/logrus" ) // PullOptions includes data to alter certain knobs when pulling a source @@ -26,8 +29,17 @@ type PullOptions struct { Quiet bool } +var ( + // noSignaturePolicy is a default policy if policy.json is not found on + // the host machine. + noSignaturePolicy string = `{"default":[{"type":"insecureAcceptAnything"}]}` +) + // Pull `imageInput` from a container registry to `sourcePath`. func Pull(ctx context.Context, imageInput types.ImageReference, localDestPath *define.VMFile, options *PullOptions) error { + var ( + policy *signature.Policy + ) destRef, err := layout.ParseReference(localDestPath.GetPath()) if err != nil { return err @@ -44,10 +56,28 @@ func Pull(ctx context.Context, imageInput types.ImageReference, localDestPath *d sysCtx.DockerAuthConfig = authConf } - policy, err := signature.DefaultPolicy(sysCtx) - if err != nil { - return fmt.Errorf("obtaining default signature policy: %w", err) + // Policy paths returns a slice of directories where the policy.json + // may live. Iterate those directories and try to see if any are + // valid ignoring when the file does not exist + for _, path := range policyPaths() { + policy, err = signature.NewPolicyFromFile(path) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + continue + } + return fmt.Errorf("reading signature policy: %w", err) + } } + + // If no policy has been found yet, we use a no signature policy automatically + if policy == nil { + logrus.Debug("no signature policy file found: using default allow everything signature policy") + policy, err = signature.NewPolicyFromBytes([]byte(noSignaturePolicy)) + if err != nil { + return fmt.Errorf("obtaining signature policy: %w", err) + } + } + policyContext, err := signature.NewPolicyContext(policy) if err != nil { return fmt.Errorf("creating new signature policy context: %w", err) diff --git a/pkg/machine/ocipull/source.go b/pkg/machine/ocipull/source.go index 91eb67fbc4..a3d7125335 100644 --- a/pkg/machine/ocipull/source.go +++ b/pkg/machine/ocipull/source.go @@ -107,9 +107,9 @@ func GetDiskArtifactReference(ctx context.Context, imgSrc types.ImageSource, opt // podman-machine-images should have a original file name // stored in the annotations under org.opencontainers.image.title // i.e. fedora-coreos-39.20240128.2.2-qemu.x86_64.qcow2.xz - originalFileName, ok := v1MannyFest.Layers[0].Annotations["org.opencontainers.image.title"] + originalFileName, ok := v1MannyFest.Layers[0].Annotations[specV1.AnnotationTitle] if !ok { - return "", fmt.Errorf("unable to determine original artifact name: missing required annotation 'org.opencontainers.image.title'") + return "", fmt.Errorf("unable to determine original artifact name: missing required annotation '%s'", specV1.AnnotationTitle) } logrus.Debugf("original artifact file name: %s", originalFileName) return artifactDigest, err diff --git a/pkg/machine/options_darwin.go b/pkg/machine/options_darwin.go deleted file mode 100644 index 3959175d2c..0000000000 --- a/pkg/machine/options_darwin.go +++ /dev/null @@ -1,11 +0,0 @@ -package machine - -import "os" - -func getRuntimeDir() (string, error) { - tmpDir, ok := os.LookupEnv("TMPDIR") - if !ok { - tmpDir = "/tmp" - } - return tmpDir, nil -} diff --git a/pkg/machine/options_freebsd.go b/pkg/machine/options_freebsd.go deleted file mode 100644 index 3959175d2c..0000000000 --- a/pkg/machine/options_freebsd.go +++ /dev/null @@ -1,11 +0,0 @@ -package machine - -import "os" - -func getRuntimeDir() (string, error) { - tmpDir, ok := os.LookupEnv("TMPDIR") - if !ok { - tmpDir = "/tmp" - } - return tmpDir, nil -} diff --git a/pkg/machine/options_linux.go b/pkg/machine/options_linux.go deleted file mode 100644 index 5ba8280008..0000000000 --- a/pkg/machine/options_linux.go +++ /dev/null @@ -1,13 +0,0 @@ -package machine - -import ( - "github.com/containers/podman/v5/pkg/rootless" - "github.com/containers/podman/v5/pkg/util" -) - -func getRuntimeDir() (string, error) { - if !rootless.IsRootless() { - return "/run", nil - } - return util.GetRootlessRuntimeDir() -} diff --git a/pkg/machine/options_windows.go b/pkg/machine/options_windows.go deleted file mode 100644 index 1a880069c6..0000000000 --- a/pkg/machine/options_windows.go +++ /dev/null @@ -1,11 +0,0 @@ -package machine - -import "os" - -func getRuntimeDir() (string, error) { - tmpDir, ok := os.LookupEnv("TEMP") - if !ok { - tmpDir = os.Getenv("LOCALAPPDATA") + "\\Temp" - } - return tmpDir, nil -} diff --git a/pkg/machine/os/machine_os.go b/pkg/machine/os/machine_os.go index b77164fb98..6ff55fa92b 100644 --- a/pkg/machine/os/machine_os.go +++ b/pkg/machine/os/machine_os.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/containers/podman/v5/pkg/machine" + "github.com/containers/podman/v5/pkg/machine/env" "github.com/containers/podman/v5/pkg/machine/shim" "github.com/containers/podman/v5/pkg/machine/vmconfigs" ) @@ -27,7 +28,7 @@ func (m *MachineOS) Apply(image string, opts ApplyOptions) error { return err } - dirs, err := machine.GetMachineDirs(m.Provider.VMType()) + dirs, err := env.GetMachineDirs(m.Provider.VMType()) if err != nil { return err } diff --git a/pkg/machine/ports.go b/pkg/machine/ports/ports.go similarity index 96% rename from pkg/machine/ports.go rename to pkg/machine/ports/ports.go index 2837d492f6..e9e9afa2b4 100644 --- a/pkg/machine/ports.go +++ b/pkg/machine/ports/ports.go @@ -1,4 +1,4 @@ -package machine +package ports import ( "context" @@ -11,6 +11,7 @@ import ( "path/filepath" "strconv" + "github.com/containers/podman/v5/pkg/machine/env" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/lockfile" "github.com/sirupsen/logrus" @@ -135,7 +136,7 @@ func getRandomPortHold() (io.Closer, int, error) { } func acquirePortLock() (*lockfile.LockFile, error) { - lockDir, err := GetGlobalDataDir() + lockDir, err := env.GetGlobalDataDir() if err != nil { return nil, err } @@ -150,7 +151,7 @@ func acquirePortLock() (*lockfile.LockFile, error) { } func loadPortAllocations() (map[int]struct{}, error) { - portDir, err := GetGlobalDataDir() + portDir, err := env.GetGlobalDataDir() if err != nil { return nil, err } @@ -186,7 +187,7 @@ func loadPortAllocations() (map[int]struct{}, error) { } func storePortAllocations(ports map[int]struct{}) error { - portDir, err := GetGlobalDataDir() + portDir, err := env.GetGlobalDataDir() if err != nil { return err } diff --git a/pkg/machine/ports_unix.go b/pkg/machine/ports/ports_unix.go similarity index 98% rename from pkg/machine/ports_unix.go rename to pkg/machine/ports/ports_unix.go index 38cad8948a..20188d3d78 100644 --- a/pkg/machine/ports_unix.go +++ b/pkg/machine/ports/ports_unix.go @@ -1,6 +1,6 @@ //go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd -package machine +package ports import ( "net" diff --git a/pkg/machine/ports_windows.go b/pkg/machine/ports/ports_windows.go similarity index 98% rename from pkg/machine/ports_windows.go rename to pkg/machine/ports/ports_windows.go index 730d4be0b3..abfb4592e1 100644 --- a/pkg/machine/ports_windows.go +++ b/pkg/machine/ports/ports_windows.go @@ -1,4 +1,4 @@ -package machine +package ports import ( "net" diff --git a/pkg/machine/provider/platform.go b/pkg/machine/provider/platform.go index c280df9634..40cf79053a 100644 --- a/pkg/machine/provider/platform.go +++ b/pkg/machine/provider/platform.go @@ -3,7 +3,9 @@ package provider import ( + "errors" "fmt" + "io/fs" "os" "github.com/containers/common/pkg/config" @@ -30,8 +32,40 @@ func Get() (vmconfigs.VMProvider, error) { logrus.Debugf("Using Podman machine with `%s` virtualization provider", resolvedVMType.String()) switch resolvedVMType { case define.QemuVirt: - return new(qemu.QEMUStubber), nil + return qemu.NewStubber() default: return nil, fmt.Errorf("unsupported virtualization provider: `%s`", resolvedVMType.String()) } } + +func GetAll(_ bool) ([]vmconfigs.VMProvider, error) { + return []vmconfigs.VMProvider{new(qemu.QEMUStubber)}, nil +} + +// SupportedProviders returns the providers that are supported on the host operating system +func SupportedProviders() []define.VMType { + return []define.VMType{define.QemuVirt} +} + +// InstalledProviders returns the supported providers that are installed on the host +func InstalledProviders() ([]define.VMType, error) { + cfg, err := config.Default() + if err != nil { + return nil, err + } + _, err = cfg.FindHelperBinary(qemu.QemuCommand, true) + if errors.Is(err, fs.ErrNotExist) { + return []define.VMType{}, nil + } + if err != nil { + return nil, err + } + + return []define.VMType{define.QemuVirt}, nil +} + +// HasPermsForProvider returns whether the host operating system has the proper permissions to use the given provider +func HasPermsForProvider(provider define.VMType) bool { + // there are no permissions required for QEMU + return provider == define.QemuVirt +} diff --git a/pkg/machine/provider/platform_darwin.go b/pkg/machine/provider/platform_darwin.go index 3bed439884..0c76421546 100644 --- a/pkg/machine/provider/platform_darwin.go +++ b/pkg/machine/provider/platform_darwin.go @@ -1,12 +1,18 @@ package provider import ( + "bytes" "fmt" "os" + "os/exec" + "runtime" + "strings" + "github.com/blang/semver/v4" "github.com/containers/common/pkg/config" "github.com/containers/podman/v5/pkg/machine/applehv" "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/libkrun" "github.com/containers/podman/v5/pkg/machine/vmconfigs" "github.com/sirupsen/logrus" ) @@ -29,7 +35,94 @@ func Get() (vmconfigs.VMProvider, error) { switch resolvedVMType { case define.AppleHvVirt: return new(applehv.AppleHVStubber), nil + case define.LibKrun: + return new(libkrun.LibKrunStubber), nil default: return nil, fmt.Errorf("unsupported virtualization provider: `%s`", resolvedVMType.String()) } } + +func GetAll(_ bool) ([]vmconfigs.VMProvider, error) { + return []vmconfigs.VMProvider{ + new(applehv.AppleHVStubber), + new(libkrun.LibKrunStubber), + }, nil +} + +// SupportedProviders returns the providers that are supported on the host operating system +func SupportedProviders() []define.VMType { + supported := []define.VMType{define.AppleHvVirt} + if runtime.GOARCH == "arm64" { + return append(supported, define.LibKrun) + } + return supported +} + +// InstalledProviders returns the supported providers that are installed on the host +func InstalledProviders() ([]define.VMType, error) { + installed := []define.VMType{} + + appleHvInstalled, err := appleHvInstalled() + if err != nil { + return nil, err + } + if appleHvInstalled { + installed = append(installed, define.AppleHvVirt) + } + + libKrunInstalled, err := libKrunInstalled() + if err != nil { + return nil, err + } + if libKrunInstalled { + installed = append(installed, define.LibKrun) + } + + return installed, nil +} + +func appleHvInstalled() (bool, error) { + var outBuf bytes.Buffer + // Apple's Virtualization.Framework is only supported on MacOS 11.0+, + // but to use EFI MacOS 13.0+ is required + expectedVer, err := semver.Make("13.0.0") + if err != nil { + return false, err + } + + cmd := exec.Command("sw_vers", "--productVersion") + cmd.Stdout = &outBuf + if err := cmd.Run(); err != nil { + return false, fmt.Errorf("unable to check current macOS version using `sw_vers --productVersion`: %s", err) + } + + // the output will be in the format of MAJOR.MINOR.PATCH + output := strings.TrimSuffix(outBuf.String(), "\n") + currentVer, err := semver.Make(output) + if err != nil { + return false, err + } + + return currentVer.GTE(expectedVer), nil +} + +func libKrunInstalled() (bool, error) { + if runtime.GOARCH != "arm64" { + return false, nil + } + + // need to verify that krunkit, virglrenderer, and libkrun-efi are installed + cfg, err := config.Default() + if err != nil { + return false, err + } + + _, err = cfg.FindHelperBinary("krunkit", false) + return err == nil, nil +} + +// HasPermsForProvider returns whether the host operating system has the proper permissions to use the given provider +func HasPermsForProvider(provider define.VMType) bool { + // there are no permissions required for AppleHV or LibKrun + return provider == define.AppleHvVirt || provider == define.LibKrun +} diff --git a/pkg/machine/provider/platform_test.go b/pkg/machine/provider/platform_test.go new file mode 100644 index 0000000000..241aa322b2 --- /dev/null +++ b/pkg/machine/provider/platform_test.go @@ -0,0 +1,87 @@ +package provider + +import ( + "runtime" + "testing" + + "github.com/containers/podman/v5/pkg/machine/define" + "github.com/stretchr/testify/assert" +) + +func TestSupportedProviders(t *testing.T) { + switch runtime.GOOS { + case "darwin": + if runtime.GOARCH == "arm64" { + assert.Equal(t, []define.VMType{define.AppleHvVirt, define.LibKrun}, SupportedProviders()) + } else { + assert.Equal(t, []define.VMType{define.AppleHvVirt}, SupportedProviders()) + } + case "windows": + assert.Equal(t, []define.VMType{define.WSLVirt, define.HyperVVirt}, SupportedProviders()) + case "linux": + assert.Equal(t, []define.VMType{define.QemuVirt}, SupportedProviders()) + } +} + +func TestInstalledProviders(t *testing.T) { + installed, err := InstalledProviders() + assert.Nil(t, err) + switch runtime.GOOS { + case "darwin": + // TODO: need to verify if an arm64 machine reports {applehv, libkrun} + assert.Equal(t, []define.VMType{define.AppleHvVirt}, installed) + case "windows": + provider, err := Get() + assert.Nil(t, err) + assert.Contains(t, installed, provider) + case "linux": + assert.Equal(t, []define.VMType{define.QemuVirt}, installed) + } +} + +func TestHasPermsForProvider(t *testing.T) { + provider, err := Get() + assert.Nil(t, err) + assert.True(t, HasPermsForProvider(provider.VMType())) +} + +func TestHasBadPerms(t *testing.T) { + switch runtime.GOOS { + case "darwin": + assert.False(t, HasPermsForProvider(define.QemuVirt)) + case "windows": + assert.False(t, HasPermsForProvider(define.QemuVirt)) + case "linux": + assert.False(t, HasPermsForProvider(define.AppleHvVirt)) + } +} + +func TestBadSupportedProviders(t *testing.T) { + switch runtime.GOOS { + case "darwin": + assert.NotEqual(t, []define.VMType{define.QemuVirt}, SupportedProviders()) + if runtime.GOARCH != "arm64" { + assert.NotEqual(t, []define.VMType{define.AppleHvVirt, define.LibKrun}, SupportedProviders()) + } + case "windows": + assert.NotEqual(t, []define.VMType{define.QemuVirt}, SupportedProviders()) + case "linux": + assert.NotEqual(t, []define.VMType{define.AppleHvVirt}, SupportedProviders()) + } +} + +func TestBadInstalledProviders(t *testing.T) { + installed, err := InstalledProviders() + assert.Nil(t, err) + switch runtime.GOOS { + case "darwin": + assert.NotEqual(t, []define.VMType{define.QemuVirt}, installed) + if runtime.GOARCH != "arm64" { + assert.NotEqual(t, []define.VMType{define.AppleHvVirt, define.LibKrun}, installed) + } + case "windows": + assert.NotContains(t, installed, define.QemuVirt) + case "linux": + assert.NotEqual(t, []define.VMType{define.AppleHvVirt}, installed) + } +} diff --git a/pkg/machine/provider/platform_windows.go b/pkg/machine/provider/platform_windows.go index 943a66926c..772693669d 100644 --- a/pkg/machine/provider/platform_windows.go +++ b/pkg/machine/provider/platform_windows.go @@ -4,8 +4,10 @@ import ( "fmt" "os" + "github.com/containers/libhvee/pkg/hypervctl" "github.com/containers/podman/v5/pkg/machine/vmconfigs" "github.com/containers/podman/v5/pkg/machine/wsl" + "github.com/containers/podman/v5/pkg/machine/wsl/wutil" "github.com/containers/common/pkg/config" "github.com/containers/podman/v5/pkg/machine/define" @@ -32,8 +34,58 @@ func Get() (vmconfigs.VMProvider, error) { case define.WSLVirt: return new(wsl.WSLStubber), nil case define.HyperVVirt: + if !wsl.HasAdminRights() { + return nil, fmt.Errorf("hyperv machines require admin authority") + } return new(hyperv.HyperVStubber), nil default: return nil, fmt.Errorf("unsupported virtualization provider: `%s`", resolvedVMType.String()) } } + +func GetAll(force bool) ([]vmconfigs.VMProvider, error) { + providers := []vmconfigs.VMProvider{ + new(wsl.WSLStubber), + } + if !wsl.HasAdminRights() && !force { + logrus.Warn("managing hyperv machines require admin authority.") + } else { + providers = append(providers, new(hyperv.HyperVStubber)) + } + return providers, nil +} + +// SupportedProviders returns the providers that are supported on the host operating system +func SupportedProviders() []define.VMType { + return []define.VMType{define.HyperVVirt, define.WSLVirt} +} + +// InstalledProviders returns the supported providers that are installed on the host +func InstalledProviders() ([]define.VMType, error) { + installed := []define.VMType{} + if wutil.IsWSLInstalled() { + installed = append(installed, define.WSLVirt) + } + + service, err := hypervctl.NewLocalHyperVService() + if err == nil { + installed = append(installed, define.HyperVVirt) + } + service.Close() + + return installed, nil +} + +// HasPermsForProvider returns whether the host operating system has the proper permissions to use the given provider +func HasPermsForProvider(provider define.VMType) bool { + switch provider { + case define.QemuVirt: + fallthrough + case define.AppleHvVirt: + return false + case define.HyperVVirt: + return wsl.HasAdminRights() + } + + return true +} diff --git a/pkg/machine/proxyenv/env.go b/pkg/machine/proxyenv/env.go new file mode 100644 index 0000000000..1b358ad72a --- /dev/null +++ b/pkg/machine/proxyenv/env.go @@ -0,0 +1,57 @@ +package proxyenv + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/containers/common/libnetwork/etchosts" + "github.com/containers/common/pkg/config" + "github.com/containers/podman/v5/pkg/machine" + "github.com/containers/podman/v5/pkg/machine/vmconfigs" + "github.com/sirupsen/logrus" +) + +const proxySetupScriptTemplate = `#!/bin/bash + +SYSTEMD_CONF=/etc/systemd/system.conf.d/default-env.conf +ENVD_CONF=/etc/environment.d/default-env.conf +PROFILE_CONF=/etc/profile.d/default-env.sh + +mkdir -p /etc/profile.d /etc/environment.d /etc/systemd/system.conf.d/ +rm -f $SYSTEMD_CONF $ENVD_CONF $PROFILE_CONF + +echo "[Manager]" >> $SYSTEMD_CONF +for proxy in %s; do + printf "DefaultEnvironment=%%q\n" "$proxy" >> $SYSTEMD_CONF + printf "%%q\n" "$proxy" >> $ENVD_CONF + printf "export %%q\n" "$proxy" >> $PROFILE_CONF +done + +systemctl daemon-reload +` + +func getProxyScript(isWSL bool) io.Reader { + var envs []string + for _, key := range config.ProxyEnv { + if value, ok := os.LookupEnv(key); ok { + // WSL does not use host.containers.internal as valid name for the VM. + if !isWSL { + value = strings.ReplaceAll(value, "127.0.0.1", etchosts.HostContainersInternal) + value = strings.ReplaceAll(value, "localhost", etchosts.HostContainersInternal) + } + // %q to quote the value correctly + envs = append(envs, fmt.Sprintf("%q", key+"="+value)) + } + } + + script := fmt.Sprintf(proxySetupScriptTemplate, strings.Join(envs, " ")) + logrus.Tracef("Final environment variable setup script: %s", script) + return strings.NewReader(script) +} + +func ApplyProxies(mc *vmconfigs.MachineConfig) error { + return machine.CommonSSHWithStdin("root", mc.SSH.IdentityPath, mc.Name, mc.SSH.Port, []string{"/usr/bin/bash"}, + getProxyScript(mc.WSLHypervisor != nil)) +} diff --git a/pkg/machine/pull.go b/pkg/machine/pull.go index cba6466ee1..932c46a941 100644 --- a/pkg/machine/pull.go +++ b/pkg/machine/pull.go @@ -15,6 +15,7 @@ import ( "github.com/containers/podman/v5/pkg/machine/compression" "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/env" "github.com/containers/podman/v5/pkg/machine/ocipull" "github.com/containers/podman/v5/utils" "github.com/sirupsen/logrus" @@ -31,11 +32,11 @@ func NewGenericDownloader(vmType define.VMType, vmName, pullPath string) (Distri var ( imageName string ) - dataDir, err := GetDataDir(vmType) + dataDir, err := env.GetDataDir(vmType) if err != nil { return nil, err } - cacheDir, err := GetCacheDir(vmType) + cacheDir, err := env.GetCacheDir(vmType) if err != nil { return nil, err } diff --git a/pkg/machine/qemu/command/command.go b/pkg/machine/qemu/command/command.go index b795d73b41..11994f85f1 100644 --- a/pkg/machine/qemu/command/command.go +++ b/pkg/machine/qemu/command/command.go @@ -10,7 +10,9 @@ import ( "strconv" "time" + "github.com/containers/common/pkg/strongunits" "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/storage/pkg/fileutils" ) // defaultQMPTimeout is the timeout duration for the @@ -32,8 +34,11 @@ func NewQemuBuilder(binary string, options []string) QemuCmd { } // SetMemory adds the specified amount of memory for the machine -func (q *QemuCmd) SetMemory(m uint64) { - *q = append(*q, "-m", strconv.FormatUint(m, 10)) +func (q *QemuCmd) SetMemory(m strongunits.MiB) { + serializedMem := strconv.FormatUint(uint64(m), 10) + // In order to use virtiofsd, we must enable shared memory + *q = append(*q, "-object", fmt.Sprintf("memory-backend-memfd,id=mem,size=%sM,share=on", serializedMem)) + *q = append(*q, "-m", serializedMem) } // SetCPUs adds the number of CPUs the machine will have @@ -52,10 +57,16 @@ func (q *QemuCmd) SetQmpMonitor(monitor Monitor) { } // SetNetwork adds a network device to the machine -func (q *QemuCmd) SetNetwork() { +func (q *QemuCmd) SetNetwork(vlanSocket *define.VMFile) error { // Right now the mac address is hardcoded so that the host networking gives it a specific IP address. This is // why we can only run one vm at a time right now - *q = append(*q, "-netdev", "socket,id=vlan,fd=3", "-device", "virtio-net-pci,netdev=vlan,mac=5a:94:ef:e4:0c:ee") + *q = append(*q, "-netdev", socketVlanNetdev(vlanSocket.GetPath())) + *q = append(*q, "-device", "virtio-net-pci,netdev=vlan,mac=5a:94:ef:e4:0c:ee") + return nil +} + +func socketVlanNetdev(path string) string { + return fmt.Sprintf("stream,id=vlan,server=off,addr.type=unix,addr.path=%s", path) } // SetNetwork adds a network device to the machine @@ -88,15 +99,6 @@ func (q *QemuCmd) SetSerialPort(readySocket, vmPidFile define.VMFile, name strin "-pidfile", vmPidFile.GetPath()) } -// SetVirtfsMount adds a virtfs mount to the machine -func (q *QemuCmd) SetVirtfsMount(source, tag, securityModel string, readonly bool) { - virtfsOptions := fmt.Sprintf("local,path=%s,mount_tag=%s,security_model=%s", source, tag, securityModel) - if readonly { - virtfsOptions += ",readonly" - } - *q = append(*q, "-virtfs", virtfsOptions) -} - // SetBootableImage specifies the image the machine will use to boot func (q *QemuCmd) SetBootableImage(image string) { *q = append(*q, "-drive", "if=virtio,file="+image) @@ -107,11 +109,6 @@ func (q *QemuCmd) SetDisplay(display string) { *q = append(*q, "-display", display) } -// SetPropagatedHostEnvs adds options that propagate SSL and proxy settings -func (q *QemuCmd) SetPropagatedHostEnvs() { - *q = PropagateHostEnv(*q) -} - func (q *QemuCmd) Build() []string { return *q } @@ -127,7 +124,7 @@ type Monitor struct { // NewQMPMonitor creates the monitor subsection of our vm func NewQMPMonitor(name string, machineRuntimeDir *define.VMFile) (Monitor, error) { - if _, err := os.Stat(machineRuntimeDir.GetPath()); errors.Is(err, fs.ErrNotExist) { + if err := fileutils.Exists(machineRuntimeDir.GetPath()); errors.Is(err, fs.ErrNotExist) { if err := os.MkdirAll(machineRuntimeDir.GetPath(), 0755); err != nil { return Monitor{}, err } diff --git a/pkg/machine/qemu/command/command_test.go b/pkg/machine/qemu/command/command_test.go deleted file mode 100644 index 9967d850db..0000000000 --- a/pkg/machine/qemu/command/command_test.go +++ /dev/null @@ -1,96 +0,0 @@ -//go:build !darwin - -package command - -import ( - "encoding/base64" - "fmt" - "strings" - "testing" - - "github.com/containers/common/libnetwork/etchosts" - "github.com/containers/podman/v5/pkg/machine/define" - "github.com/stretchr/testify/assert" -) - -func TestPropagateHostEnv(t *testing.T) { - tests := map[string]struct { - value string - expect string - }{ - "HTTP_PROXY": { - "proxy", - "equal", - }, - "ftp_proxy": { - "domain.com:8888", - "equal", - }, - "FTP_PROXY": { - "proxy", - "equal", - }, - "NO_PROXY": { - "localaddress", - "equal", - }, - "HTTPS_PROXY": { - "", - "unset", - }, - "no_proxy": { - "", - "unset", - }, - "http_proxy": { - "127.0.0.1:8888", - fmt.Sprintf("%s:8888", etchosts.HostContainersInternal), - }, - "https_proxy": { - "localhost:8888", - fmt.Sprintf("%s:8888", etchosts.HostContainersInternal), - }, - "SSL_CERT_FILE": { - "/some/f=oo.cert", - fmt.Sprintf("%s/f=oo.cert", define.UserCertsTargetPath), - }, - "SSL_CERT_DIR": { - "/some/my/certs", - define.UserCertsTargetPath, - }, - } - - for key, item := range tests { - t.Setenv(key, item.value) - } - - cmdLine := PropagateHostEnv(make([]string, 0)) - - assert.Len(t, cmdLine, 2) - assert.Equal(t, "-fw_cfg", cmdLine[0]) - tokens := strings.Split(cmdLine[1], ",string=") - decodeString, err := base64.StdEncoding.DecodeString(tokens[1]) - assert.NoError(t, err) - - // envsRawArr looks like: ["BAR=\"bar\"", "FOO=\"foo\""] - envsRawArr := strings.Split(string(decodeString), "|") - // envs looks like: {"BAR": "bar", "FOO": "foo"} - envs := make(map[string]string) - for _, env := range envsRawArr { - key, value, _ := strings.Cut(env, "=") - envs[key] = strings.Trim(value, "\"") - } - - for key, test := range tests { - switch test.expect { - case "equal": - assert.Equal(t, envs[key], test.value) - case "unset": - if _, ok := envs[key]; ok { - t.Errorf("env %s should not be set", key) - } - default: - assert.Equal(t, envs[key], test.expect) - } - } -} diff --git a/pkg/machine/qemu/command/helpers.go b/pkg/machine/qemu/command/helpers.go deleted file mode 100644 index c763770b9b..0000000000 --- a/pkg/machine/qemu/command/helpers.go +++ /dev/null @@ -1,60 +0,0 @@ -//go:build !darwin - -package command - -import ( - "encoding/base64" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/containers/common/libnetwork/etchosts" - "github.com/containers/common/pkg/config" - "github.com/containers/podman/v5/pkg/machine/define" -) - -func GetProxyVariables() map[string]string { - proxyOpts := make(map[string]string) - for _, variable := range config.ProxyEnv { - if value, ok := os.LookupEnv(variable); ok { - if value == "" { - continue - } - - v := strings.ReplaceAll(value, "127.0.0.1", etchosts.HostContainersInternal) - v = strings.ReplaceAll(v, "localhost", etchosts.HostContainersInternal) - proxyOpts[variable] = v - } - } - return proxyOpts -} - -// PropagateHostEnv is here for providing the ability to propagate -// proxy and SSL settings (e.g. HTTP_PROXY and others) on a start -// and avoid a need of re-creating/re-initiating a VM -func PropagateHostEnv(cmdLine QemuCmd) QemuCmd { - varsToPropagate := make([]string, 0) - - for k, v := range GetProxyVariables() { - varsToPropagate = append(varsToPropagate, fmt.Sprintf("%s=%q", k, v)) - } - - if sslCertFile, ok := os.LookupEnv("SSL_CERT_FILE"); ok { - pathInVM := filepath.Join(define.UserCertsTargetPath, filepath.Base(sslCertFile)) - varsToPropagate = append(varsToPropagate, fmt.Sprintf("%s=%q", "SSL_CERT_FILE", pathInVM)) - } - - if _, ok := os.LookupEnv("SSL_CERT_DIR"); ok { - varsToPropagate = append(varsToPropagate, fmt.Sprintf("%s=%q", "SSL_CERT_DIR", define.UserCertsTargetPath)) - } - - if len(varsToPropagate) > 0 { - prefix := "name=opt/com.coreos/environment,string=" - envVarsJoined := strings.Join(varsToPropagate, "|") - fwCfgArg := prefix + base64.StdEncoding.EncodeToString([]byte(envVarsJoined)) - return append(cmdLine, "-fw_cfg", fwCfgArg) - } - - return cmdLine -} diff --git a/pkg/machine/qemu/command/qemu_command_test.go b/pkg/machine/qemu/command/qemu_command_test.go index eae29bd527..0c70f4af02 100644 --- a/pkg/machine/qemu/command/qemu_command_test.go +++ b/pkg/machine/qemu/command/qemu_command_test.go @@ -21,6 +21,9 @@ func TestQemuCmd(t *testing.T) { readySocket, err := define.NewMachineFile(t.TempDir()+"readySocket.sock", nil) assert.NoError(t, err) + vlanSocket, err := define.NewMachineFile(t.TempDir()+"vlanSocket.sock", nil) + assert.NoError(t, err) + vmPidFile, err := define.NewMachineFile(t.TempDir()+"vmpidfile.pid", nil) assert.NoError(t, err) @@ -32,6 +35,7 @@ func TestQemuCmd(t *testing.T) { ignPath := ignFile.GetPath() addrFilePath := machineAddrFile.GetPath() readySocketPath := readySocket.GetPath() + vlanSocketPath := vlanSocket.GetPath() vmPidFilePath := vmPidFile.GetPath() bootableImagePath := t.TempDir() + "test-machine_fedora-coreos-38.20230918.2.0-qemu.x86_64.qcow2" @@ -40,25 +44,26 @@ func TestQemuCmd(t *testing.T) { cmd.SetCPUs(4) cmd.SetIgnitionFile(*ignFile) cmd.SetQmpMonitor(monitor) - cmd.SetNetwork() + err = cmd.SetNetwork(vlanSocket) + assert.NoError(t, err) cmd.SetSerialPort(*readySocket, *vmPidFile, "test-machine") - cmd.SetVirtfsMount("/tmp/path", "vol10", "none", true) cmd.SetBootableImage(bootableImagePath) cmd.SetDisplay("none") expected := []string{ "/usr/bin/qemu-system-x86_64", + "-object", + "memory-backend-memfd,id=mem,size=2048M,share=on", "-m", "2048", "-smp", "4", "-fw_cfg", fmt.Sprintf("name=opt/com.coreos/config,file=%s", ignPath), "-qmp", fmt.Sprintf("unix:%s,server=on,wait=off", addrFilePath), - "-netdev", "socket,id=vlan,fd=3", + "-netdev", socketVlanNetdev(vlanSocketPath), "-device", "virtio-net-pci,netdev=vlan,mac=5a:94:ef:e4:0c:ee", "-device", "virtio-serial", "-chardev", fmt.Sprintf("socket,path=%s,server=on,wait=off,id=atest-machine_ready", readySocketPath), "-device", "virtserialport,chardev=atest-machine_ready,name=org.fedoraproject.port.0", "-pidfile", vmPidFilePath, - "-virtfs", "local,path=/tmp/path,mount_tag=vol10,security_model=none,readonly", "-drive", fmt.Sprintf("if=virtio,file=%s", bootableImagePath), "-display", "none"} diff --git a/pkg/machine/qemu/machine.go b/pkg/machine/qemu/machine.go index 19e8368325..64fb04a73d 100644 --- a/pkg/machine/qemu/machine.go +++ b/pkg/machine/qemu/machine.go @@ -1,4 +1,4 @@ -//go:build !darwin +//go:build linux || freebsd package qemu @@ -16,15 +16,17 @@ import ( "time" "github.com/containers/common/pkg/config" + "github.com/containers/podman/v5/pkg/errorhandling" "github.com/containers/podman/v5/pkg/machine/define" "github.com/containers/podman/v5/pkg/machine/vmconfigs" + "github.com/containers/storage/pkg/fileutils" "github.com/digitalocean/go-qemu/qmp" "github.com/sirupsen/logrus" ) -const ( - MountType9p = "9p" -) +func NewStubber() (*QEMUStubber, error) { + return &QEMUStubber{}, nil +} // qemuPid returns -1 or the PID of the running QEMU instance. func qemuPid(pidFile *define.VMFile) (int, error) { @@ -113,9 +115,6 @@ func (q *QEMUStubber) waitForMachineToStop(mc *vmconfigs.MachineConfig) error { // Stop uses the qmp monitor to call a system_powerdown func (q *QEMUStubber) StopVM(mc *vmconfigs.MachineConfig, _ bool) error { - mc.Lock() - defer mc.Unlock() - if err := mc.Refresh(); err != nil { return err } @@ -149,7 +148,7 @@ func (q *QEMUStubber) StopVM(mc *vmconfigs.MachineConfig, _ bool) error { // stopLocked stops the machine and expects the caller to hold the machine's lock. func (q *QEMUStubber) stopLocked(mc *vmconfigs.MachineConfig) error { // check if the qmp socket is there. if not, qemu instance is gone - if _, err := os.Stat(mc.QEMUHypervisor.QMPMonitor.Address.GetPath()); errors.Is(err, fs.ErrNotExist) { + if err := fileutils.Exists(mc.QEMUHypervisor.QMPMonitor.Address.GetPath()); errors.Is(err, fs.ErrNotExist) { // Right now it is NOT an error to stop a stopped machine logrus.Debugf("QMP monitor socket %v does not exist", mc.QEMUHypervisor.QMPMonitor.Address) // Fix incorrect starting state in case of crash during start @@ -228,22 +227,27 @@ func (q *QEMUStubber) stopLocked(mc *vmconfigs.MachineConfig) error { // Remove deletes all the files associated with a machine including the image itself func (q *QEMUStubber) Remove(mc *vmconfigs.MachineConfig) ([]string, func() error, error) { - mc.Lock() - defer mc.Unlock() - qemuRmFiles := []string{ mc.QEMUHypervisor.QEMUPidPath.GetPath(), mc.QEMUHypervisor.QMPMonitor.Address.GetPath(), } return qemuRmFiles, func() error { - return nil + var errs []error + if err := mc.QEMUHypervisor.QEMUPidPath.Delete(); err != nil { + errs = append(errs, err) + } + + if err := mc.QEMUHypervisor.QMPMonitor.Address.Delete(); err != nil { + errs = append(errs, err) + } + return errorhandling.JoinErrors(errs) }, nil } func (q *QEMUStubber) State(mc *vmconfigs.MachineConfig, bypass bool) (define.Status, error) { // Check if qmp socket path exists - if _, err := os.Stat(mc.QEMUHypervisor.QMPMonitor.Address.GetPath()); errors.Is(err, fs.ErrNotExist) { + if err := fileutils.Exists(mc.QEMUHypervisor.QMPMonitor.Address.GetPath()); errors.Is(err, fs.ErrNotExist) { return define.Stopped, nil } if err := mc.Refresh(); err != nil { diff --git a/pkg/machine/qemu/options_linux_amd64.go b/pkg/machine/qemu/options_linux_amd64.go index 9e8c680ab0..67b6c61fee 100644 --- a/pkg/machine/qemu/options_linux_amd64.go +++ b/pkg/machine/qemu/options_linux_amd64.go @@ -10,6 +10,7 @@ func (q *QEMUStubber) addArchOptions(_ *setNewMachineCMDOpts) []string { opts := []string{ "-accel", "kvm", "-cpu", "host", + "-M", "memory-backend=mem", } return opts } diff --git a/pkg/machine/qemu/options_linux_arm64.go b/pkg/machine/qemu/options_linux_arm64.go index 63dcd8d81d..09be86a62d 100644 --- a/pkg/machine/qemu/options_linux_arm64.go +++ b/pkg/machine/qemu/options_linux_arm64.go @@ -3,8 +3,9 @@ package qemu import ( - "os" "path/filepath" + + "github.com/containers/storage/pkg/fileutils" ) var ( @@ -15,7 +16,7 @@ func (q *QEMUStubber) addArchOptions(_ *setNewMachineCMDOpts) []string { opts := []string{ "-accel", "kvm", "-cpu", "host", - "-M", "virt,gic-version=max", + "-M", "virt,gic-version=max,memory-backend=mem", "-bios", getQemuUefiFile("QEMU_EFI.fd"), } return opts @@ -27,7 +28,7 @@ func getQemuUefiFile(name string) string { "/usr/share/edk2/aarch64", } for _, dir := range dirs { - if _, err := os.Stat(dir); err == nil { + if err := fileutils.Exists(dir); err == nil { return filepath.Join(dir, name) } } diff --git a/pkg/machine/qemu/options_windows_amd64.go b/pkg/machine/qemu/options_windows_amd64.go index 6e0ba0271a..1a929a5f32 100644 --- a/pkg/machine/qemu/options_windows_amd64.go +++ b/pkg/machine/qemu/options_windows_amd64.go @@ -1,4 +1,4 @@ -//go:build windows && amd64 +//go:build tempoff package qemu diff --git a/pkg/machine/qemu/stubber.go b/pkg/machine/qemu/stubber.go index bcb8803e59..576d30bbcc 100644 --- a/pkg/machine/qemu/stubber.go +++ b/pkg/machine/qemu/stubber.go @@ -1,4 +1,4 @@ -//go:build !darwin +//go:build linux || freebsd package qemu @@ -7,7 +7,6 @@ import ( "bytes" "errors" "fmt" - "net" "os" "os/exec" "path/filepath" @@ -32,8 +31,16 @@ type QEMUStubber struct { vmconfigs.QEMUConfig // Command describes the final QEMU command line Command command.QemuCmd + + // virtiofsHelpers are virtiofsd child processes + virtiofsHelpers []virtiofsdHelperCmd } +var ( + gvProxyWaitBackoff = 500 * time.Millisecond + gvProxyMaxBackoffAttempts = 6 +) + func (q QEMUStubber) UserModeNetworkEnabled(*vmconfigs.MachineConfig) bool { return true } @@ -70,15 +77,14 @@ func (q *QEMUStubber) setQEMUCommandLine(mc *vmconfigs.MachineConfig) error { q.Command.SetCPUs(mc.Resources.CPUs) q.Command.SetIgnitionFile(*ignitionFile) q.Command.SetQmpMonitor(mc.QEMUHypervisor.QMPMonitor) - q.Command.SetNetwork() - q.Command.SetSerialPort(*readySocket, *mc.QEMUHypervisor.QEMUPidPath, mc.Name) - - // Add volumes to qemu command line - for _, mount := range mc.Mounts { - // the index provided in this case is thrown away - _, _, _, _, securityModel := vmconfigs.SplitVolume(0, mount.OriginalInput) - q.Command.SetVirtfsMount(mount.Source, mount.Tag, securityModel, mount.ReadOnly) + gvProxySock, err := mc.GVProxySocket() + if err != nil { + return err + } + if err := q.Command.SetNetwork(gvProxySock); err != nil { + return err } + q.Command.SetSerialPort(*readySocket, *mc.QEMUHypervisor.QEMUPidPath, mc.Name) q.Command.SetUSBHostPassthrough(mc.Resources.USBs) @@ -106,7 +112,7 @@ func (q *QEMUStubber) CreateVM(opts define.CreateVMOpts, mc *vmconfigs.MachineCo mc.QEMUHypervisor = &qemuConfig mc.QEMUHypervisor.QEMUPidPath = qemuPidPath - return q.resizeDisk(strongunits.GiB(mc.Resources.DiskSize), mc.ImagePath) + return q.resizeDisk(mc.Resources.DiskSize, mc.ImagePath) } func runStartVMCommand(cmd *exec.Cmd) error { @@ -136,9 +142,6 @@ func (q *QEMUStubber) StartVM(mc *vmconfigs.MachineConfig) (func() error, func() return nil, nil, fmt.Errorf("unable to generate qemu command line: %q", err) } - defaultBackoff := 500 * time.Millisecond - maxBackoffs := 6 - readySocket, err := mc.ReadySocket() if err != nil { return nil, nil, err @@ -149,17 +152,10 @@ func (q *QEMUStubber) StartVM(mc *vmconfigs.MachineConfig) (func() error, func() return nil, nil, err } - qemuNetdevSockConn, err := sockets.DialSocketWithBackoffs(maxBackoffs, defaultBackoff, gvProxySock.GetPath()) - if err != nil { - return nil, nil, fmt.Errorf("failed to connect to gvproxy socket: %w", err) - } - defer qemuNetdevSockConn.Close() - - fd, err := qemuNetdevSockConn.(*net.UnixConn).File() - if err != nil { + // Wait on gvproxy to be running and aware + if err := sockets.WaitForSocketWithBackoffs(gvProxyMaxBackoffAttempts, gvProxyWaitBackoff, gvProxySock.GetPath(), "gvproxy"); err != nil { return nil, nil, err } - defer fd.Close() dnr, dnw, err := machine.GetDevNullFiles() if err != nil { @@ -168,9 +164,25 @@ func (q *QEMUStubber) StartVM(mc *vmconfigs.MachineConfig) (func() error, func() defer dnr.Close() defer dnw.Close() - cmdLine := q.Command + runtime, err := mc.RuntimeDir() + if err != nil { + return nil, nil, err + } + spawner, err := newVirtiofsdSpawner(runtime) + if err != nil { + return nil, nil, err + } - cmdLine.SetPropagatedHostEnvs() + for _, hostmnt := range mc.Mounts { + qemuArgs, virtiofsdHelper, err := spawner.spawnForMount(hostmnt) + if err != nil { + return nil, nil, fmt.Errorf("failed to init virtiofsd for mount %s: %w", hostmnt.Source, err) + } + q.Command = append(q.Command, qemuArgs...) + q.virtiofsHelpers = append(q.virtiofsHelpers, *virtiofsdHelper) + } + + cmdLine := q.Command // Disable graphic window when not in debug mode // Done in start, so we're not suck with the debug level we used on init @@ -184,12 +196,11 @@ func (q *QEMUStubber) StartVM(mc *vmconfigs.MachineConfig) (func() error, func() // actually run the command that starts the virtual machine cmd := &exec.Cmd{ - Args: cmdLine, - Path: cmdLine[0], - Stdin: dnr, - Stdout: dnw, - Stderr: stderrBuf, - ExtraFiles: []*os.File{fd}, + Args: cmdLine, + Path: cmdLine[0], + Stdin: dnr, + Stdout: dnw, + Stderr: stderrBuf, } if err := runStartVMCommand(cmd); err != nil { @@ -201,8 +212,20 @@ func (q *QEMUStubber) StartVM(mc *vmconfigs.MachineConfig) (func() error, func() return waitForReady(readySocket, cmd.Process.Pid, stderrBuf) } + releaseFunc := func() error { + if err := cmd.Process.Release(); err != nil { + return err + } + for _, virtiofsdCmd := range q.virtiofsHelpers { + if err := virtiofsdCmd.command.Process.Release(); err != nil { + return err + } + } + return nil + } + // if this is not the last line in the func, make it a defer - return cmd.Process.Release, readyFunc, nil + return releaseFunc, readyFunc, nil } func waitForReady(readySocket *define.VMFile, pid int, stdErrBuffer *bytes.Buffer) error { @@ -255,9 +278,6 @@ func (q *QEMUStubber) resizeDisk(newSize strongunits.GiB, diskPath *define.VMFil } func (q *QEMUStubber) SetProviderAttrs(mc *vmconfigs.MachineConfig, opts define.SetOptions) error { - mc.Lock() - defer mc.Unlock() - state, err := q.State(mc, false) if err != nil { return err @@ -319,7 +339,7 @@ func (q *QEMUStubber) MountVolumesToVM(mc *vmconfigs.MachineConfig, quiet bool) // create mountpoint directory if it doesn't exist // because / is immutable, we have to monkey around with permissions // if we dont mount in /home or /mnt - args := []string{"-q", "--"} + var args []string if !strings.HasPrefix(mount.Target, "/home") && !strings.HasPrefix(mount.Target, "/mnt") { args = append(args, "sudo", "chattr", "-i", "/", ";") } @@ -331,33 +351,42 @@ func (q *QEMUStubber) MountVolumesToVM(mc *vmconfigs.MachineConfig, quiet bool) if err != nil { return err } - switch mount.Type { - case MountType9p: - mountOptions := []string{"-t", "9p"} - mountOptions = append(mountOptions, []string{"-o", "trans=virtio", mount.Tag, mount.Target}...) - mountOptions = append(mountOptions, []string{"-o", "version=9p2000.L,msize=131072,cache=mmap"}...) - if mount.ReadOnly { - mountOptions = append(mountOptions, []string{"-o", "ro"}...) - } - err = machine.CommonSSH(mc.SSH.RemoteUsername, mc.SSH.IdentityPath, mc.Name, mc.SSH.Port, append([]string{"-q", "--", "sudo", "mount"}, mountOptions...)) - if err != nil { - return err - } - default: - return fmt.Errorf("unknown mount type: %s", mount.Type) + // NOTE: The mount type q.Type was previously serialized as 9p for older Linux versions, + // but we ignore it now because we want the mount type to be dynamic, not static. Or + // in other words we don't want to make people unnecessarily reprovision their machines + // to upgrade from 9p to virtiofs. + mountOptions := []string{"-t", "virtiofs"} + mountOptions = append(mountOptions, []string{mount.Tag, mount.Target}...) + mountFlags := fmt.Sprintf("context=\"%s\"", machine.NFSSELinuxContext) + if mount.ReadOnly { + mountFlags += ",ro" + } + mountOptions = append(mountOptions, "-o", mountFlags) + err = machine.CommonSSH(mc.SSH.RemoteUsername, mc.SSH.IdentityPath, mc.Name, mc.SSH.Port, append([]string{"sudo", "mount"}, mountOptions...)) + if err != nil { + return err } } return nil } func (q *QEMUStubber) MountType() vmconfigs.VolumeMountType { - return vmconfigs.NineP + return vmconfigs.VirtIOFS } func (q *QEMUStubber) PostStartNetworking(mc *vmconfigs.MachineConfig, noInfo bool) error { return nil } +func (q *QEMUStubber) UpdateSSHPort(mc *vmconfigs.MachineConfig, port int) error { + // managed by gvproxy on this backend, so nothing to do + return nil +} + func (q *QEMUStubber) GetDisk(userInputPath string, dirs *define.MachineDirs, mc *vmconfigs.MachineConfig) error { return diskpull.GetDisk(userInputPath, dirs, mc.ImagePath, q.VMType(), mc.Name) } + +func (q *QEMUStubber) GetRosetta(mc *vmconfigs.MachineConfig) (bool, error) { + return false, nil +} diff --git a/pkg/machine/qemu/virtiofsd.go b/pkg/machine/qemu/virtiofsd.go new file mode 100644 index 0000000000..fe2c697006 --- /dev/null +++ b/pkg/machine/qemu/virtiofsd.go @@ -0,0 +1,99 @@ +//go:build linux || freebsd + +package qemu + +import ( + "fmt" + "os" + "os/exec" + "time" + + "github.com/containers/common/pkg/config" + "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/vmconfigs" + "github.com/containers/storage/pkg/fileutils" + "github.com/sirupsen/logrus" +) + +// VirtiofsdSpawner spawns an instance of virtiofsd +type virtiofsdSpawner struct { + runtimeDir *define.VMFile + binaryPath string + mountCount uint +} + +func newVirtiofsdSpawner(runtimeDir *define.VMFile) (*virtiofsdSpawner, error) { + var binaryPath string + cfg, err := config.Default() + if err != nil { + return nil, err + } + binaryPath, err = cfg.FindHelperBinary("virtiofsd", true) + if err != nil { + return nil, fmt.Errorf("failed to find virtiofsd: %w", err) + } + return &virtiofsdSpawner{binaryPath: binaryPath, runtimeDir: runtimeDir}, nil +} + +// createVirtiofsCmd returns a new command instance configured to launch virtiofsd. +func (v *virtiofsdSpawner) createVirtiofsCmd(directory, socketPath string) *exec.Cmd { + args := []string{"--sandbox", "none", "--socket-path", socketPath, "--shared-dir", "."} + // We don't need seccomp filtering; we trust our workloads. This incidentally + // works around issues like https://gitlab.com/virtio-fs/virtiofsd/-/merge_requests/200. + args = append(args, "--seccomp=none") + cmd := exec.Command(v.binaryPath, args...) + // This sets things up so that the `.` we passed in the arguments is the target directory + cmd.Dir = directory + // Quiet the daemon by default + cmd.Env = append(cmd.Environ(), "RUST_LOG=ERROR") + cmd.Stdin = nil + cmd.Stdout = nil + if logrus.IsLevelEnabled(logrus.DebugLevel) { + cmd.Stderr = os.Stderr + } + return cmd +} + +type virtiofsdHelperCmd struct { + command exec.Cmd + socket *define.VMFile +} + +// spawnForMount returns on success a combination of qemu commandline and child process for virtiofsd +func (v *virtiofsdSpawner) spawnForMount(hostmnt *vmconfigs.Mount) ([]string, *virtiofsdHelperCmd, error) { + logrus.Debugf("Initializing virtiofsd mount for %s", hostmnt.Source) + // By far the most common failure to spawn virtiofsd will be a typo'd source directory, + // so let's synchronously check that ourselves here. + if err := fileutils.Exists(hostmnt.Source); err != nil { + return nil, nil, fmt.Errorf("failed to access virtiofs source directory %s", hostmnt.Source) + } + virtiofsChar := fmt.Sprintf("virtiofschar%d", v.mountCount) + virtiofsCharPath, err := v.runtimeDir.AppendToNewVMFile(virtiofsChar, nil) + if err != nil { + return nil, nil, err + } + + qemuCommand := []string{} + + qemuCommand = append(qemuCommand, "-chardev", fmt.Sprintf("socket,id=%s,path=%s", virtiofsChar, virtiofsCharPath.Path)) + qemuCommand = append(qemuCommand, "-device", fmt.Sprintf("vhost-user-fs-pci,queue-size=1024,chardev=%s,tag=%s", virtiofsChar, hostmnt.Tag)) + // TODO: Honor hostmnt.readonly somehow here (add an option to virtiofsd) + virtiofsdCmd := v.createVirtiofsCmd(hostmnt.Source, virtiofsCharPath.Path) + if err := virtiofsdCmd.Start(); err != nil { + return nil, nil, fmt.Errorf("failed to start virtiofsd") + } + // Wait for the socket + for { + if err := fileutils.Exists(virtiofsCharPath.Path); err == nil { + break + } + logrus.Debugf("waiting for virtiofsd socket %q", virtiofsCharPath.Path) + time.Sleep(time.Millisecond * 100) + } + // Increment our count of mounts which are used to create unique names for the devices + v.mountCount += 1 + return qemuCommand, &virtiofsdHelperCmd{ + command: *virtiofsdCmd, + socket: virtiofsCharPath, + }, nil +} diff --git a/pkg/machine/shim/claim_unsupported.go b/pkg/machine/shim/claim_unsupported.go index 0fc9403002..d9485df3df 100644 --- a/pkg/machine/shim/claim_unsupported.go +++ b/pkg/machine/shim/claim_unsupported.go @@ -2,18 +2,24 @@ package shim +func findClaimHelper() string { + return "" +} + +// All of these are unused on Windows but are used on Linux. +// So we're just silencing Windows lint warnings here. + +//nolint:unused func dockerClaimHelperInstalled() bool { return false } +//nolint:unused func claimDockerSock() bool { return false } +//nolint:unused func dockerClaimSupported() bool { return false } - -func findClaimHelper() string { - return "" -} diff --git a/pkg/machine/shim/diskpull/diskpull.go b/pkg/machine/shim/diskpull/diskpull.go index ce768ad593..83526d3f5b 100644 --- a/pkg/machine/shim/diskpull/diskpull.go +++ b/pkg/machine/shim/diskpull/diskpull.go @@ -15,12 +15,12 @@ func GetDisk(userInputPath string, dirs *define.MachineDirs, imagePath *define.V mydisk ocipull.Disker ) - if userInputPath == "" { - mydisk, err = ocipull.NewOCIArtifactPull(context.Background(), dirs, name, vmType, imagePath) + if userInputPath == "" || strings.HasPrefix(userInputPath, "docker://") { + mydisk, err = ocipull.NewOCIArtifactPull(context.Background(), dirs, userInputPath, name, vmType, imagePath) } else { if strings.HasPrefix(userInputPath, "http") { // TODO probably should use tempdir instead of datadir - mydisk, err = stdpull.NewDiskFromURL(userInputPath, imagePath, dirs.DataDir, nil) + mydisk, err = stdpull.NewDiskFromURL(userInputPath, imagePath, dirs.DataDir, nil, false) } else { mydisk, err = stdpull.NewStdDiskPull(userInputPath, imagePath) } diff --git a/pkg/machine/shim/host.go b/pkg/machine/shim/host.go index 03a979af2d..e1c1834f60 100644 --- a/pkg/machine/shim/host.go +++ b/pkg/machine/shim/host.go @@ -1,17 +1,22 @@ package shim import ( + "bufio" "errors" "fmt" "os" "path/filepath" "runtime" + "strings" "time" "github.com/containers/podman/v5/pkg/machine" "github.com/containers/podman/v5/pkg/machine/connection" machineDefine "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/env" "github.com/containers/podman/v5/pkg/machine/ignition" + "github.com/containers/podman/v5/pkg/machine/lock" + "github.com/containers/podman/v5/pkg/machine/proxyenv" "github.com/containers/podman/v5/pkg/machine/vmconfigs" "github.com/containers/podman/v5/utils" "github.com/hashicorp/go-multierror" @@ -26,7 +31,7 @@ func List(vmstubbers []vmconfigs.VMProvider, _ machine.ListOptions) ([]*machine. ) for _, s := range vmstubbers { - dirs, err := machine.GetMachineDirs(s.VMType()) + dirs, err := env.GetMachineDirs(s.VMType()) if err != nil { return nil, err } @@ -62,34 +67,41 @@ func List(vmstubbers []vmconfigs.VMProvider, _ machine.ListOptions) ([]*machine. return lrs, nil } -func Init(opts machineDefine.InitOptions, mp vmconfigs.VMProvider) (*vmconfigs.MachineConfig, error) { +func Init(opts machineDefine.InitOptions, mp vmconfigs.VMProvider) error { var ( err error imageExtension string imagePath *machineDefine.VMFile ) - callbackFuncs := machine.InitCleanup() + callbackFuncs := machine.CleanUp() defer callbackFuncs.CleanIfErr(&err) go callbackFuncs.CleanOnSignal() - dirs, err := machine.GetMachineDirs(mp.VMType()) + dirs, err := env.GetMachineDirs(mp.VMType()) if err != nil { - return nil, err + return err } - sshIdentityPath, err := machine.GetSSHIdentityPath(machineDefine.DefaultIdentityName) + sshIdentityPath, err := env.GetSSHIdentityPath(machineDefine.DefaultIdentityName) if err != nil { - return nil, err + return err } sshKey, err := machine.GetSSHKeys(sshIdentityPath) if err != nil { - return nil, err + return err } - mc, err := vmconfigs.NewMachineConfig(opts, dirs, sshIdentityPath, mp.VMType()) + machineLock, err := lock.GetMachineLock(opts.Name, dirs.ConfigDir.GetPath()) if err != nil { - return nil, err + return err + } + machineLock.Lock() + defer machineLock.Unlock() + + mc, err := vmconfigs.NewMachineConfig(opts, dirs, sshIdentityPath, mp.VMType(), machineLock) + if err != nil { + return err } mc.Version = vmconfigs.MachineConfigVersion @@ -114,17 +126,19 @@ func Init(opts machineDefine.InitOptions, mp vmconfigs.VMProvider) (*vmconfigs.M switch mp.VMType() { case machineDefine.QemuVirt: imageExtension = ".qcow2" - case machineDefine.AppleHvVirt: + case machineDefine.AppleHvVirt, machineDefine.LibKrun: imageExtension = ".raw" case machineDefine.HyperVVirt: imageExtension = ".vhdx" + case machineDefine.WSLVirt: + imageExtension = "" default: - // do nothing + return fmt.Errorf("unknown VM type: %s", mp.VMType()) } imagePath, err = dirs.DataDir.AppendToNewVMFile(fmt.Sprintf("%s-%s%s", opts.Name, runtime.GOARCH, imageExtension), nil) if err != nil { - return nil, err + return err } mc.ImagePath = imagePath @@ -137,8 +151,8 @@ func Init(opts machineDefine.InitOptions, mp vmconfigs.VMProvider) (*vmconfigs.M // "/path // "docker://quay.io/something/someManifest - if err := mp.GetDisk(opts.ImagePath, dirs, mc); err != nil { - return nil, err + if err := mp.GetDisk(opts.Image, dirs, mc); err != nil { + return err } callbackFuncs.Add(mc.ImagePath.Delete) @@ -147,7 +161,7 @@ func Init(opts machineDefine.InitOptions, mp vmconfigs.VMProvider) (*vmconfigs.M ignitionFile, err := mc.IgnitionFile() if err != nil { - return nil, err + return err } uid := os.Getuid() @@ -180,22 +194,22 @@ func Init(opts machineDefine.InitOptions, mp vmconfigs.VMProvider) (*vmconfigs.M // copy it into the conf dir if len(opts.IgnitionPath) > 0 { err = ignBuilder.BuildWithIgnitionFile(opts.IgnitionPath) - return nil, err + return err } err = ignBuilder.GenerateIgnitionConfig() if err != nil { - return nil, err + return err } readyIgnOpts, err := mp.PrepareIgnition(mc, &ignBuilder) if err != nil { - return nil, err + return err } readyUnitFile, err := ignition.CreateReadyUnitFile(mp.VMType(), readyIgnOpts) if err != nil { - return nil, err + return err } readyUnit := ignition.Unit{ @@ -212,7 +226,7 @@ func Init(opts machineDefine.InitOptions, mp vmconfigs.VMProvider) (*vmconfigs.M // TODO AddSSHConnectionToPodmanSocket could take an machineconfig instead if err := connection.AddSSHConnectionsToPodmanSocket(mc.HostUser.UID, mc.SSH.Port, mc.SSH.IdentityPath, mc.Name, mc.SSH.RemoteUsername, opts); err != nil { - return nil, err + return err } cleanup := func() error { @@ -222,15 +236,15 @@ func Init(opts machineDefine.InitOptions, mp vmconfigs.VMProvider) (*vmconfigs.M err = mp.CreateVM(createOpts, mc, &ignBuilder) if err != nil { - return nil, err + return err } err = ignBuilder.Build() if err != nil { - return nil, err + return err } - return mc, err + return mc.Write() } // VMExists looks across given providers for a machine's existence. returns the actual config and found bool @@ -256,13 +270,8 @@ func VMExists(name string, vmstubbers []vmconfigs.VMProvider) (*vmconfigs.Machin return nil, false, nil } -// CheckExclusiveActiveVM checks if any of the machines are already running -func CheckExclusiveActiveVM(provider vmconfigs.VMProvider, mc *vmconfigs.MachineConfig) error { - // Don't check if provider supports parallel running machines - if !provider.RequireExclusiveActive() { - return nil - } - +// checkExclusiveActiveVM checks if any of the machines are already running +func checkExclusiveActiveVM(provider vmconfigs.VMProvider, mc *vmconfigs.MachineConfig) error { // Check if any other machines are running; if so, we error localMachines, err := getMCsOverProviders([]vmconfigs.VMProvider{provider}) if err != nil { @@ -273,8 +282,8 @@ func CheckExclusiveActiveVM(provider vmconfigs.VMProvider, mc *vmconfigs.Machine if err != nil { return err } - if state == machineDefine.Running { - return fmt.Errorf("unable to start %q: machine %s already running", mc.Name, name) + if state == machineDefine.Running || state == machineDefine.Starting { + return fmt.Errorf("unable to start %q: machine %s: %w", mc.Name, name, machineDefine.ErrVMAlreadyRunning) } } return nil @@ -285,7 +294,7 @@ func CheckExclusiveActiveVM(provider vmconfigs.VMProvider, mc *vmconfigs.Machine func getMCsOverProviders(vmstubbers []vmconfigs.VMProvider) (map[string]*vmconfigs.MachineConfig, error) { mcs := make(map[string]*vmconfigs.MachineConfig) for _, stubber := range vmstubbers { - dirs, err := machine.GetMachineDirs(stubber.VMType()) + dirs, err := env.GetMachineDirs(stubber.VMType()) if err != nil { return nil, err } @@ -306,10 +315,20 @@ func getMCsOverProviders(vmstubbers []vmconfigs.VMProvider) (map[string]*vmconfi } // Stop stops the machine as well as supporting binaries/processes -// TODO: I think this probably needs to go somewhere that remove can call it. func Stop(mc *vmconfigs.MachineConfig, mp vmconfigs.VMProvider, dirs *machineDefine.MachineDirs, hardStop bool) error { // state is checked here instead of earlier because stopping a stopped vm is not considered // an error. so putting in one place instead of sprinkling all over. + mc.Lock() + defer mc.Unlock() + if err := mc.Refresh(); err != nil { + return fmt.Errorf("reload config: %w", err) + } + + return stopLocked(mc, mp, dirs, hardStop) +} + +// stopLocked stops the machine and expects the caller to hold the machine's lock. +func stopLocked(mc *vmconfigs.MachineConfig, mp vmconfigs.VMProvider, dirs *machineDefine.MachineDirs, hardStop bool) error { state, err := mp.State(mc, false) if err != nil { return err @@ -342,26 +361,84 @@ func Stop(mc *vmconfigs.MachineConfig, mp vmconfigs.VMProvider, dirs *machineDef if err != nil { return err } - - defer func() { - if err := machine.CleanupGVProxy(*gvproxyPidFile); err != nil { - logrus.Errorf("unable to clean up gvproxy: %q", err) - } - }() + if err := machine.CleanupGVProxy(*gvproxyPidFile); err != nil { + return fmt.Errorf("unable to clean up gvproxy: %w", err) + } } - return nil + // Update last time up + mc.LastUp = time.Now() + return mc.Write() } -func Start(mc *vmconfigs.MachineConfig, mp vmconfigs.VMProvider, _ *machineDefine.MachineDirs, opts machine.StartOptions) error { +func Start(mc *vmconfigs.MachineConfig, mp vmconfigs.VMProvider, dirs *machineDefine.MachineDirs, opts machine.StartOptions) error { defaultBackoff := 500 * time.Millisecond maxBackoffs := 6 + mc.Lock() + defer mc.Unlock() + if err := mc.Refresh(); err != nil { + return fmt.Errorf("reload config: %w", err) + } + + // Don't check if provider supports parallel running machines + if mp.RequireExclusiveActive() { + startLock, err := lock.GetMachineStartLock() + if err != nil { + return err + } + startLock.Lock() + defer startLock.Unlock() + + if err := checkExclusiveActiveVM(mp, mc); err != nil { + return err + } + } else { + // still should make sure we do not start the same machine twice + state, err := mp.State(mc, false) + if err != nil { + return err + } + + if state == machineDefine.Running || state == machineDefine.Starting { + return fmt.Errorf("machine %s: %w", mc.Name, machineDefine.ErrVMAlreadyRunning) + } + } + + // Set starting to true + mc.Starting = true + if err := mc.Write(); err != nil { + logrus.Error(err) + } + // Set starting to false on exit + defer func() { + mc.Starting = false + if err := mc.Write(); err != nil { + logrus.Error(err) + } + }() + + gvproxyPidFile, err := dirs.RuntimeDir.AppendToNewVMFile("gvproxy.pid", nil) + if err != nil { + return err + } + // start gvproxy and set up the API socket forwarding forwardSocketPath, forwardingState, err := startNetworking(mc, mp) if err != nil { return err } + + callBackFuncs := machine.CleanUp() + defer callBackFuncs.CleanIfErr(&err) + go callBackFuncs.CleanOnSignal() + + // Clean up gvproxy if start fails + cleanGV := func() error { + return machine.CleanupGVProxy(*gvproxyPidFile) + } + callBackFuncs.Add(cleanGV) + // if there are generic things that need to be done, a preStart function could be added here // should it be extensive @@ -414,6 +491,10 @@ func Start(mc *vmconfigs.MachineConfig, mp vmconfigs.VMProvider, _ *machineDefin return errors.New(msg) } + if err := proxyenv.ApplyProxies(mc); err != nil { + return err + } + // mount the volumes to the VM if err := mp.MountVolumesToVM(mc, opts.Quiet); err != nil { return err @@ -450,38 +531,168 @@ func Start(mc *vmconfigs.MachineConfig, mp vmconfigs.VMProvider, _ *machineDefin return nil } -func Reset(dirs *machineDefine.MachineDirs, mp vmconfigs.VMProvider, mcs map[string]*vmconfigs.MachineConfig) error { - var resetErrors *multierror.Error - for _, mc := range mcs { - err := Stop(mc, mp, dirs, true) +func Set(mc *vmconfigs.MachineConfig, mp vmconfigs.VMProvider, opts machineDefine.SetOptions) error { + mc.Lock() + defer mc.Unlock() + + if err := mc.Refresh(); err != nil { + return fmt.Errorf("reload config: %w", err) + } + + if opts.CPUs != nil { + mc.Resources.CPUs = *opts.CPUs + } + + if opts.Memory != nil { + mc.Resources.Memory = *opts.Memory + } + + if opts.DiskSize != nil { + if *opts.DiskSize <= mc.Resources.DiskSize { + return fmt.Errorf("new disk size must be larger than %d GB", mc.Resources.DiskSize) + } + mc.Resources.DiskSize = *opts.DiskSize + } + + if err := mp.SetProviderAttrs(mc, opts); err != nil { + return err + } + + // Update the configuration file last if everything earlier worked + return mc.Write() +} + +func Remove(mc *vmconfigs.MachineConfig, mp vmconfigs.VMProvider, dirs *machineDefine.MachineDirs, opts machine.RemoveOptions) error { + mc.Lock() + defer mc.Unlock() + if err := mc.Refresh(); err != nil { + return fmt.Errorf("reload config: %w", err) + } + + state, err := mp.State(mc, false) + if err != nil { + return err + } + + if state == machineDefine.Running { + if !opts.Force { + return &machineDefine.ErrVMRunningCannotDestroyed{Name: mc.Name} + } + } + + rmFiles, genericRm, err := mc.Remove(opts.SaveIgnition, opts.SaveImage) + if err != nil { + return err + } + + providerFiles, providerRm, err := mp.Remove(mc) + if err != nil { + return err + } + + // Add provider specific files to the list + rmFiles = append(rmFiles, providerFiles...) + + // Important! + // Nothing can be removed at this point. The user can still opt out below + // + + if !opts.Force { + // Warn user + confirmationMessage(rmFiles) + reader := bufio.NewReader(os.Stdin) + fmt.Print("Are you sure you want to continue? [y/N] ") + answer, err := reader.ReadString('\n') if err != nil { - resetErrors = multierror.Append(resetErrors, err) + return err + } + if strings.ToLower(answer)[0] != 'y' { + return nil + } + } + + if state == machineDefine.Running { + if err := stopLocked(mc, mp, dirs, true); err != nil { + return err } - _, genericRm, err := mc.Remove(false, false) + } + + // + // All actual removal of files and vms should occur after this + // + + if err := providerRm(); err != nil { + logrus.Errorf("failed to remove virtual machine from provider for %q: %v", mc.Name, err) + } + + if err := genericRm(); err != nil { + return fmt.Errorf("failed to remove machines files: %v", err) + } + return nil +} + +func confirmationMessage(files []string) { + fmt.Printf("The following files will be deleted:\n\n\n") + for _, msg := range files { + fmt.Println(msg) + } +} + +func Reset(mps []vmconfigs.VMProvider, opts machine.ResetOptions) error { + var resetErrors *multierror.Error + removeDirs := []*machineDefine.MachineDirs{} + + for _, p := range mps { + d, err := env.GetMachineDirs(p.VMType()) if err != nil { resetErrors = multierror.Append(resetErrors, err) + continue } - _, providerRm, err := mp.Remove(mc) + mcs, err := vmconfigs.LoadMachinesInDir(d) if err != nil { resetErrors = multierror.Append(resetErrors, err) + continue } + removeDirs = append(removeDirs, d) - if err := genericRm(); err != nil { - resetErrors = multierror.Append(resetErrors, err) - } - if err := providerRm(); err != nil { - resetErrors = multierror.Append(resetErrors, err) + for _, mc := range mcs { + err := Stop(mc, p, d, true) + if err != nil { + resetErrors = multierror.Append(resetErrors, err) + } + _, genericRm, err := mc.Remove(false, false) + if err != nil { + resetErrors = multierror.Append(resetErrors, err) + } + _, providerRm, err := p.Remove(mc) + if err != nil { + resetErrors = multierror.Append(resetErrors, err) + } + + if err := genericRm(); err != nil { + resetErrors = multierror.Append(resetErrors, err) + } + if err := providerRm(); err != nil { + resetErrors = multierror.Append(resetErrors, err) + } } } // Delete the various directories + // We do this after all the provider rm's, since providers may still share the base machine dir. // Note: we cannot delete the machine run dir blindly like this because // other things live there like the podman.socket and so forth. - - // in linux this ~/.local/share/containers/podman/machine - dataDirErr := utils.GuardedRemoveAll(filepath.Dir(dirs.DataDir.GetPath())) - // in linux this ~/.config/containers/podman/machine - confDirErr := utils.GuardedRemoveAll(filepath.Dir(dirs.ConfigDir.GetPath())) - resetErrors = multierror.Append(resetErrors, confDirErr, dataDirErr) + for _, dir := range removeDirs { + // in linux this ~/.local/share/containers/podman/machine + dataDirErr := utils.GuardedRemoveAll(filepath.Dir(dir.DataDir.GetPath())) + if !errors.Is(dataDirErr, os.ErrNotExist) { + resetErrors = multierror.Append(resetErrors, dataDirErr) + } + // in linux this ~/.config/containers/podman/machine + confDirErr := utils.GuardedRemoveAll(filepath.Dir(dir.ConfigDir.GetPath())) + if !errors.Is(confDirErr, os.ErrNotExist) { + resetErrors = multierror.Append(resetErrors, confDirErr) + } + } return resetErrors.ErrorOrNil() } diff --git a/pkg/machine/shim/networking.go b/pkg/machine/shim/networking.go index 98c3bbe61d..d7ebd8e7d7 100644 --- a/pkg/machine/shim/networking.go +++ b/pkg/machine/shim/networking.go @@ -11,7 +11,10 @@ import ( "github.com/containers/common/pkg/config" gvproxy "github.com/containers/gvisor-tap-vsock/pkg/types" "github.com/containers/podman/v5/pkg/machine" + "github.com/containers/podman/v5/pkg/machine/connection" "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/env" + "github.com/containers/podman/v5/pkg/machine/ports" "github.com/containers/podman/v5/pkg/machine/vmconfigs" "github.com/sirupsen/logrus" ) @@ -54,9 +57,7 @@ func startHostForwarder(mc *vmconfigs.MachineConfig, provider vmconfigs.VMProvid runDir := dirs.RuntimeDir cmd.PidFile = filepath.Join(runDir.GetPath(), "gvproxy.pid") - if logrus.IsLevelEnabled(logrus.DebugLevel) { - cmd.LogFile = filepath.Join(runDir.GetPath(), "gvproxy.log") - } + cmd.LogFile = filepath.Join(runDir.GetPath(), "gvproxy.log") cmd.SSHPort = mc.SSH.Port @@ -90,17 +91,25 @@ func startHostForwarder(mc *vmconfigs.MachineConfig, provider vmconfigs.VMProvid } func startNetworking(mc *vmconfigs.MachineConfig, provider vmconfigs.VMProvider) (string, machine.APIForwardingState, error) { + // Check if SSH port is in use, and reassign if necessary + if !ports.IsLocalPortAvailable(mc.SSH.Port) { + logrus.Warnf("detected port conflict on machine ssh port [%d], reassigning", mc.SSH.Port) + if err := reassignSSHPort(mc, provider); err != nil { + return "", 0, err + } + } + // Provider has its own networking code path (e.g. WSL) if provider.UseProviderNetworkSetup() { return "", 0, provider.StartNetworking(mc, nil) } - dirs, err := machine.GetMachineDirs(provider.VMType()) + dirs, err := env.GetMachineDirs(provider.VMType()) if err != nil { return "", 0, err } - hostSocks, forwardSock, forwardingState, err := setupMachineSockets(mc.Name, dirs) + hostSocks, forwardSock, forwardingState, err := setupMachineSockets(mc, dirs) if err != nil { return "", 0, err } @@ -152,6 +161,53 @@ func conductVMReadinessCheck(mc *vmconfigs.MachineConfig, maxBackoffs int, backo return } +func reassignSSHPort(mc *vmconfigs.MachineConfig, provider vmconfigs.VMProvider) error { + newPort, err := ports.AllocateMachinePort() + if err != nil { + return err + } + + success := false + defer func() { + if !success { + if err := ports.ReleaseMachinePort(newPort); err != nil { + logrus.Warnf("could not release port allocation as part of failure rollback (%d): %s", newPort, err.Error()) + } + } + }() + + // Write a transient invalid port, to force a retry on failure + oldPort := mc.SSH.Port + mc.SSH.Port = 0 + if err := mc.Write(); err != nil { + return err + } + + if err := ports.ReleaseMachinePort(oldPort); err != nil { + logrus.Warnf("could not release current ssh port allocation (%d): %s", oldPort, err.Error()) + } + + // Update the backend's settings if relevant (e.g. WSL) + if err := provider.UpdateSSHPort(mc, newPort); err != nil { + return err + } + + mc.SSH.Port = newPort + if err := connection.UpdateConnectionPairPort(mc.Name, newPort, mc.HostUser.UID, mc.SSH.RemoteUsername, mc.SSH.IdentityPath); err != nil { + return fmt.Errorf("could not update remote connection configuration: %w", err) + } + + // Write updated port back + if err := mc.Write(); err != nil { + return err + } + + // inform defer routine not to release the port + success = true + + return nil +} + func isListening(port int) bool { // Check if we can dial it conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", "127.0.0.1", port), 10*time.Millisecond) diff --git a/pkg/machine/shim/networking_unix.go b/pkg/machine/shim/networking_unix.go index 869f99d5ed..8912da919b 100644 --- a/pkg/machine/shim/networking_unix.go +++ b/pkg/machine/shim/networking_unix.go @@ -10,63 +10,92 @@ import ( "github.com/containers/podman/v5/pkg/machine" "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/vmconfigs" "github.com/sirupsen/logrus" ) -func setupMachineSockets(name string, dirs *define.MachineDirs) ([]string, string, machine.APIForwardingState, error) { - hostSocket, err := dirs.DataDir.AppendToNewVMFile("podman.sock", nil) +func setupMachineSockets(mc *vmconfigs.MachineConfig, dirs *define.MachineDirs) ([]string, string, machine.APIForwardingState, error) { + hostSocket, err := mc.APISocket() if err != nil { return nil, "", 0, err } - linkSocketPath := filepath.Dir(dirs.DataDir.GetPath()) - linkSocket, err := define.NewMachineFile(filepath.Join(linkSocketPath, "podman.sock"), nil) + forwardSock, state, err := setupForwardingLinks(hostSocket, dirs.DataDir) if err != nil { return nil, "", 0, err } - - forwardSock, state := setupForwardingLinks(hostSocket, linkSocket) return []string{hostSocket.GetPath()}, forwardSock, state, nil } -func setupForwardingLinks(hostSocket, linkSocket *define.VMFile) (string, machine.APIForwardingState) { - // The linking pattern is /var/run/docker.sock -> user global sock (link) -> machine sock (socket) - // This allows the helper to only have to maintain one constant target to the user, which can be - // repositioned without updating docker.sock. - +func setupForwardingLinks(hostSocket, dataDir *define.VMFile) (string, machine.APIForwardingState, error) { + // Sets up a cooperative link structure to help a separate privileged + // service manage /var/run/docker.sock (currently only on MacOS via + // podman-mac-helper, but potentially other OSs in the future). + // + // The linking pattern is: + // + // /var/run/docker.sock (link) -> user global sock (link) -> machine sock + // + // This allows the helper to only have to maintain one constant target to + // the user, which can be repositioned without updating docker.sock. + // + // Since these link locations are global/shared across multiple machine + // instances, they must coordinate on the winner. The scheme is first come + // first serve, whoever is actively answering on the socket first wins. All + // other machine instances backs off. As soon as the winner is no longer + // active another machine instance start will become the new active winner. + // The same applies to a competing container runtime trying to use + // /var/run/docker.sock, if the socket is in use by another runtime, podman + // machine will back off. In the start message "Losing" machine instances + // will instead advertise the direct machine socket, while "winning" + // instances will simply note they listen on the standard + // /var/run/docker.sock address. The APIForwardingState return value is + // returned by this function to indicate how the start message should behave + + // Skip any OS not supported for helper usage if !dockerClaimSupported() { - return hostSocket.GetPath(), machine.ClaimUnsupported + return hostSocket.GetPath(), machine.ClaimUnsupported, nil } + // Verify the helper system service was installed and report back if not if !dockerClaimHelperInstalled() { - return hostSocket.GetPath(), machine.NotInstalled + return hostSocket.GetPath(), machine.NotInstalled, nil + } + + dataPath := filepath.Dir(dataDir.GetPath()) + userGlobalSocket, err := define.NewMachineFile(filepath.Join(dataPath, "podman.sock"), nil) + if err != nil { + return "", 0, err } - if !alreadyLinked(hostSocket.GetPath(), linkSocket.GetPath()) { - if checkSockInUse(linkSocket.GetPath()) { - return hostSocket.GetPath(), machine.MachineLocal + // Setup the user global socket if not in use + // (e.g ~/.local/share/containers/podman/machine/podman.sock) + if !alreadyLinked(hostSocket.GetPath(), userGlobalSocket.GetPath()) { + if checkSockInUse(userGlobalSocket.GetPath()) { + return hostSocket.GetPath(), machine.MachineLocal, nil } - _ = linkSocket.Delete() + _ = userGlobalSocket.Delete() - if err := os.Symlink(hostSocket.GetPath(), linkSocket.GetPath()); err != nil { + if err := os.Symlink(hostSocket.GetPath(), userGlobalSocket.GetPath()); err != nil { logrus.Warnf("could not create user global API forwarding link: %s", err.Error()) - return hostSocket.GetPath(), machine.MachineLocal + return hostSocket.GetPath(), machine.MachineLocal, nil } } - if !alreadyLinked(linkSocket.GetPath(), dockerSock) { + // Setup /var/run/docker.sock if not in use + if !alreadyLinked(userGlobalSocket.GetPath(), dockerSock) { if checkSockInUse(dockerSock) { - return hostSocket.GetPath(), machine.MachineLocal + return hostSocket.GetPath(), machine.MachineLocal, nil } if !claimDockerSock() { logrus.Warn("podman helper is installed, but was not able to claim the global docker sock") - return hostSocket.GetPath(), machine.MachineLocal + return hostSocket.GetPath(), machine.MachineLocal, nil } } - return dockerSock, machine.DockerGlobal + return dockerSock, machine.DockerGlobal, nil } func alreadyLinked(target string, link string) bool { diff --git a/pkg/machine/shim/networking_windows.go b/pkg/machine/shim/networking_windows.go index 970aefd5aa..5428a8db4d 100644 --- a/pkg/machine/shim/networking_windows.go +++ b/pkg/machine/shim/networking_windows.go @@ -5,10 +5,12 @@ import ( "github.com/containers/podman/v5/pkg/machine" "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/env" + "github.com/containers/podman/v5/pkg/machine/vmconfigs" ) -func setupMachineSockets(name string, dirs *define.MachineDirs) ([]string, string, machine.APIForwardingState, error) { - machinePipe := machine.ToDist(name) +func setupMachineSockets(mc *vmconfigs.MachineConfig, dirs *define.MachineDirs) ([]string, string, machine.APIForwardingState, error) { + machinePipe := env.WithPodmanPrefix(mc.Name) if !machine.PipeNameAvailable(machinePipe, machine.MachineNameWait) { return nil, "", 0, fmt.Errorf("could not start api proxy since expected pipe is not available: %s", machinePipe) } diff --git a/pkg/machine/sockets/sockets.go b/pkg/machine/sockets/sockets.go index afd6d3a7ae..520dcf9099 100644 --- a/pkg/machine/sockets/sockets.go +++ b/pkg/machine/sockets/sockets.go @@ -9,6 +9,7 @@ import ( "time" "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/storage/pkg/fileutils" "github.com/sirupsen/logrus" ) @@ -94,3 +95,18 @@ func DialSocketWithBackoffsAndProcCheck( } return nil, err } + +// WaitForSocketWithBackoffs attempts to discover listening socket in maxBackoffs attempts +func WaitForSocketWithBackoffs(maxBackoffs int, backoff time.Duration, socketPath string, name string) error { + backoffWait := backoff + logrus.Debugf("checking that %q socket is ready", name) + for i := 0; i < maxBackoffs; i++ { + err := fileutils.Exists(socketPath) + if err == nil { + return nil + } + time.Sleep(backoffWait) + backoffWait *= 2 + } + return fmt.Errorf("unable to connect to %q socket at %q", name, socketPath) +} diff --git a/pkg/machine/ssh.go b/pkg/machine/ssh.go index 7a66984cf2..74b811b984 100644 --- a/pkg/machine/ssh.go +++ b/pkg/machine/ssh.go @@ -1,25 +1,115 @@ package machine import ( + "bufio" "fmt" + "io" + "os" "os/exec" "strconv" + "strings" "github.com/sirupsen/logrus" + "golang.org/x/crypto/ssh" ) // CommonSSH is a common function for ssh'ing to a podman machine using system-connections // and a port // TODO This should probably be taught about an machineconfig to reduce input func CommonSSH(username, identityPath, name string, sshPort int, inputArgs []string) error { - return commonSSH(username, identityPath, name, sshPort, inputArgs, false) + return commonBuiltinSSH(username, identityPath, name, sshPort, inputArgs, true, os.Stdin) +} + +func CommonSSHShell(username, identityPath, name string, sshPort int, inputArgs []string) error { + return commonNativeSSH(username, identityPath, name, sshPort, inputArgs, os.Stdin) } func CommonSSHSilent(username, identityPath, name string, sshPort int, inputArgs []string) error { - return commonSSH(username, identityPath, name, sshPort, inputArgs, true) + return commonBuiltinSSH(username, identityPath, name, sshPort, inputArgs, false, nil) +} + +func CommonSSHWithStdin(username, identityPath, name string, sshPort int, inputArgs []string, stdin io.Reader) error { + return commonBuiltinSSH(username, identityPath, name, sshPort, inputArgs, true, stdin) } -func commonSSH(username, identityPath, name string, sshPort int, inputArgs []string, silent bool) error { +func commonBuiltinSSH(username, identityPath, name string, sshPort int, inputArgs []string, passOutput bool, stdin io.Reader) error { + config, err := createConfig(username, identityPath) + if err != nil { + return err + } + + client, err := ssh.Dial("tcp", fmt.Sprintf("localhost:%d", sshPort), config) + if err != nil { + return err + } + defer client.Close() + + session, err := client.NewSession() + if err != nil { + return err + } + defer session.Close() + + cmd := strings.Join(inputArgs, " ") + logrus.Debugf("Running ssh command on machine %q: %s", name, cmd) + session.Stdin = stdin + if passOutput { + session.Stdout = os.Stdout + session.Stderr = os.Stderr + } else if logrus.IsLevelEnabled(logrus.DebugLevel) { + return runSessionWithDebug(session, cmd) + } + + return session.Run(cmd) +} + +func runSessionWithDebug(session *ssh.Session, cmd string) error { + outPipe, err := session.StdoutPipe() + if err != nil { + return err + } + errPipe, err := session.StderrPipe() + if err != nil { + return err + } + logOuput := func(pipe io.Reader, done chan struct{}) { + scanner := bufio.NewScanner(pipe) + for scanner.Scan() { + logrus.Debugf("ssh output: %s", scanner.Text()) + } + done <- struct{}{} + } + if err := session.Start(cmd); err != nil { + return err + } + completed := make(chan struct{}, 2) + go logOuput(outPipe, completed) + go logOuput(errPipe, completed) + <-completed + <-completed + + return session.Wait() +} + +func createConfig(user string, identityPath string) (*ssh.ClientConfig, error) { + key, err := os.ReadFile(identityPath) + if err != nil { + return nil, err + } + + signer, err := ssh.ParsePrivateKey(key) + if err != nil { + return nil, err + } + + return &ssh.ClientConfig{ + User: user, + Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)}, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + }, nil +} + +func commonNativeSSH(username, identityPath, name string, sshPort int, inputArgs []string, stdin io.Reader) error { sshDestination := username + "@localhost" port := strconv.Itoa(sshPort) interactive := true @@ -39,10 +129,8 @@ func commonSSH(username, identityPath, name string, sshPort int, inputArgs []str cmd := exec.Command("ssh", args...) logrus.Debugf("Executing: ssh %v\n", args) - if !silent { - if err := setupIOPassthrough(cmd, interactive); err != nil { - return err - } + if err := setupIOPassthrough(cmd, interactive, stdin); err != nil { + return err } return cmd.Run() diff --git a/pkg/machine/ssh_unix.go b/pkg/machine/ssh_unix.go index 17e5acd06f..620969814d 100644 --- a/pkg/machine/ssh_unix.go +++ b/pkg/machine/ssh_unix.go @@ -3,12 +3,13 @@ package machine import ( + "io" "os" "os/exec" ) -func setupIOPassthrough(cmd *exec.Cmd, interactive bool) error { - cmd.Stdin = os.Stdin +func setupIOPassthrough(cmd *exec.Cmd, interactive bool, stdin io.Reader) error { + cmd.Stdin = stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/pkg/machine/ssh_windows.go b/pkg/machine/ssh_windows.go index 3440dc1687..cf44023ff1 100644 --- a/pkg/machine/ssh_windows.go +++ b/pkg/machine/ssh_windows.go @@ -8,8 +8,8 @@ import ( "github.com/sirupsen/logrus" ) -func setupIOPassthrough(cmd *exec.Cmd, interactive bool) error { - cmd.Stdin = os.Stdin +func setupIOPassthrough(cmd *exec.Cmd, interactive bool, stdin io.Reader) error { + cmd.Stdin = stdin if interactive { cmd.Stdout = os.Stdout diff --git a/pkg/machine/stdpull/local.go b/pkg/machine/stdpull/local.go index d4aaa8121c..3ece20bb09 100644 --- a/pkg/machine/stdpull/local.go +++ b/pkg/machine/stdpull/local.go @@ -1,10 +1,9 @@ package stdpull import ( - "os" - "github.com/containers/podman/v5/pkg/machine/compression" "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/storage/pkg/fileutils" "github.com/sirupsen/logrus" ) @@ -22,7 +21,7 @@ func NewStdDiskPull(inputPath string, finalpath *define.VMFile) (*StdDiskPull, e } func (s *StdDiskPull) Get() error { - if _, err := os.Stat(s.inputPath.GetPath()); err != nil { + if err := fileutils.Exists(s.inputPath.GetPath()); err != nil { // could not find disk return err } diff --git a/pkg/machine/stdpull/url.go b/pkg/machine/stdpull/url.go index a8cc27a3f8..c724d54ef8 100644 --- a/pkg/machine/stdpull/url.go +++ b/pkg/machine/stdpull/url.go @@ -14,6 +14,7 @@ import ( "github.com/containers/podman/v5/pkg/machine/compression" "github.com/containers/podman/v5/pkg/machine/define" "github.com/containers/podman/v5/utils" + "github.com/containers/storage/pkg/fileutils" "github.com/sirupsen/logrus" ) @@ -21,9 +22,10 @@ type DiskFromURL struct { u *url2.URL finalPath *define.VMFile tempLocation *define.VMFile + cache bool } -func NewDiskFromURL(inputPath string, finalPath *define.VMFile, tempDir *define.VMFile, optionalTempFileName *string) (*DiskFromURL, error) { +func NewDiskFromURL(inputPath string, finalPath *define.VMFile, tempDir *define.VMFile, optionalTempFileName *string, cache bool) (*DiskFromURL, error) { var ( err error ) @@ -33,7 +35,7 @@ func NewDiskFromURL(inputPath string, finalPath *define.VMFile, tempDir *define. } // Make sure the temporary location exists before we get too deep - if _, err := os.Stat(tempDir.GetPath()); err != nil { + if err := fileutils.Exists(tempDir.GetPath()); err != nil { if errors.Is(err, fs.ErrNotExist) { return nil, fmt.Errorf("temporary download directory %s does not exist", tempDir.GetPath()) } @@ -56,6 +58,7 @@ func NewDiskFromURL(inputPath string, finalPath *define.VMFile, tempDir *define. u: u, finalPath: finalPath, tempLocation: tempLocation, + cache: cache, }, nil } @@ -64,6 +67,16 @@ func (d *DiskFromURL) Get() error { if err := d.pull(); err != nil { return err } + if !d.cache { + defer func() { + if err := utils.GuardedRemoveAll(d.tempLocation.GetPath()); err != nil { + if !errors.Is(err, os.ErrNotExist) { + logrus.Warn("failed to clean machine image cache: ", err) + } + } + }() + } + logrus.Debugf("decompressing (if needed) %s to %s", d.tempLocation.GetPath(), d.finalPath.GetPath()) return compression.Decompress(d.tempLocation, d.finalPath.GetPath()) } diff --git a/pkg/machine/update.go b/pkg/machine/update.go index e5839c85e7..e910db66ce 100644 --- a/pkg/machine/update.go +++ b/pkg/machine/update.go @@ -15,7 +15,7 @@ func UpdatePodmanDockerSockService(mc *vmconfigs.MachineConfig) error { command := fmt.Sprintf("'echo %q > %s'", content, ignition.PodmanDockerTmpConfPath) args := []string{"sudo", "bash", "-c", command} if err := CommonSSH(mc.SSH.RemoteUsername, mc.SSH.IdentityPath, mc.Name, mc.SSH.Port, args); err != nil { - logrus.Warnf("Could not not update internal docker sock config") + logrus.Warnf("Could not update internal docker sock config") return err } diff --git a/pkg/machine/vmconfigs/config.go b/pkg/machine/vmconfigs/config.go index 037681391f..aac6726881 100644 --- a/pkg/machine/vmconfigs/config.go +++ b/pkg/machine/vmconfigs/config.go @@ -3,6 +3,7 @@ package vmconfigs import ( "time" + "github.com/containers/common/pkg/strongunits" gvproxy "github.com/containers/gvisor-tap-vsock/pkg/types" "github.com/containers/podman/v5/pkg/machine/define" "github.com/containers/podman/v5/pkg/machine/ignition" @@ -32,10 +33,11 @@ type MachineConfig struct { ImagePath *define.VMFile // Temporary only until a proper image struct is worked out // Provider stuff - AppleHypervisor *AppleHVConfig `json:",omitempty"` - QEMUHypervisor *QEMUConfig `json:",omitempty"` - HyperVHypervisor *HyperVConfig `json:",omitempty"` - WSLHypervisor *WSLConfig `json:",omitempty"` + AppleHypervisor *AppleHVConfig `json:",omitempty"` + HyperVHypervisor *HyperVConfig `json:",omitempty"` + LibKrunHypervisor *LibKrunConfig `json:",omitempty"` + QEMUHypervisor *QEMUConfig `json:",omitempty"` + WSLHypervisor *WSLConfig `json:",omitempty"` lock *lockfile.LockFile //nolint:unused @@ -49,6 +51,8 @@ type MachineConfig struct { // Starting is defined as "on" but not fully booted Starting bool + + Rosetta bool } type machineImage interface { //nolint:unused @@ -96,6 +100,8 @@ type VMProvider interface { //nolint:interfacebloat UserModeNetworkEnabled(mc *MachineConfig) bool UseProviderNetworkSetup() bool RequireExclusiveActive() bool + UpdateSSHPort(mc *MachineConfig, port int) error + GetRosetta(mc *MachineConfig) (bool, error) } // HostUser describes the host user @@ -123,9 +129,9 @@ type ResourceConfig struct { // CPUs to be assigned to the VM CPUs uint64 // Disk size in gigabytes assigned to the vm - DiskSize uint64 + DiskSize strongunits.GiB // Memory in megabytes assigned to the vm - Memory uint64 + Memory strongunits.MiB // Usbs USBs []define.USBConfig } diff --git a/pkg/machine/vmconfigs/config_common.go b/pkg/machine/vmconfigs/config_common.go index be885579c5..d12705f946 100644 --- a/pkg/machine/vmconfigs/config_common.go +++ b/pkg/machine/vmconfigs/config_common.go @@ -19,6 +19,7 @@ type QEMUConfig struct { // Stubs type AppleHVConfig struct{} type HyperVConfig struct{} +type LibKrunConfig struct{} type WSLConfig struct{} func getHostUID() int { diff --git a/pkg/machine/vmconfigs/config_darwin.go b/pkg/machine/vmconfigs/config_darwin.go index 1d095ba1ce..359c4d392d 100644 --- a/pkg/machine/vmconfigs/config_darwin.go +++ b/pkg/machine/vmconfigs/config_darwin.go @@ -3,12 +3,16 @@ package vmconfigs import ( "os" - "github.com/containers/podman/v5/pkg/machine/applehv/vfkit" + "github.com/containers/podman/v5/pkg/machine/apple/vfkit" ) type AppleHVConfig struct { // The VFKit endpoint where we can interact with the VM - Vfkit vfkit.VfkitHelper + Vfkit vfkit.Helper +} + +type LibKrunConfig struct { + KRun vfkit.Helper } // Stubs diff --git a/pkg/machine/vmconfigs/config_windows.go b/pkg/machine/vmconfigs/config_windows.go index 0562490c7c..be39e2f953 100644 --- a/pkg/machine/vmconfigs/config_windows.go +++ b/pkg/machine/vmconfigs/config_windows.go @@ -18,8 +18,9 @@ type WSLConfig struct { } // Stubs -type QEMUConfig struct{} type AppleHVConfig struct{} +type LibKrunConfig struct{} +type QEMUConfig struct{} func getHostUID() int { return 1000 diff --git a/pkg/machine/vmconfigs/machine.go b/pkg/machine/vmconfigs/machine.go index 81d7b4e9a4..bbd0f6cc9b 100644 --- a/pkg/machine/vmconfigs/machine.go +++ b/pkg/machine/vmconfigs/machine.go @@ -10,11 +10,17 @@ import ( "strings" "time" + "github.com/containers/common/pkg/strongunits" define2 "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/errorhandling" "github.com/containers/podman/v5/pkg/machine/connection" "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/env" "github.com/containers/podman/v5/pkg/machine/lock" - "github.com/containers/podman/v5/utils" + "github.com/containers/podman/v5/pkg/machine/ports" + "github.com/containers/storage/pkg/fileutils" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/lockfile" "github.com/sirupsen/logrus" ) @@ -39,16 +45,11 @@ var ( type RemoteConnectionType string -// NewMachineConfig creates the initial machine configuration file from cli options -func NewMachineConfig(opts define.InitOptions, dirs *define.MachineDirs, sshIdentityPath string, vmtype define.VMType) (*MachineConfig, error) { +// NewMachineConfig creates the initial machine configuration file from cli options. +func NewMachineConfig(opts define.InitOptions, dirs *define.MachineDirs, sshIdentityPath string, vmtype define.VMType, machineLock *lockfile.LockFile) (*MachineConfig, error) { mc := new(MachineConfig) mc.Name = opts.Name mc.dirs = dirs - - machineLock, err := lock.GetMachineLock(opts.Name, dirs.ConfigDir.GetPath()) - if err != nil { - return nil, err - } mc.lock = machineLock // Assign Dirs @@ -57,6 +58,11 @@ func NewMachineConfig(opts define.InitOptions, dirs *define.MachineDirs, sshIden return nil, err } mc.configPath = cf + // Given that we are locked now and check again that the config file does not exists, + // if it does it means the VM was already created and we should error. + if err := fileutils.Exists(cf.Path); err == nil { + return nil, fmt.Errorf("%s: %w", opts.Name, define.ErrVMAlreadyExists) + } if vmtype != define.QemuVirt && len(opts.USBs) > 0 { return nil, fmt.Errorf("USB host passthrough not supported for %s machines", vmtype) @@ -70,14 +76,13 @@ func NewMachineConfig(opts define.InitOptions, dirs *define.MachineDirs, sshIden // System Resources mrc := ResourceConfig{ CPUs: opts.CPUS, - DiskSize: opts.DiskSize, - Memory: opts.Memory, + DiskSize: strongunits.GiB(opts.DiskSize), + Memory: strongunits.MiB(opts.Memory), USBs: usbs, } mc.Resources = mrc - // TODO WSL had a locking port mechanism, we should consider this. - sshPort, err := utils.GetRandomPort() + sshPort, err := ports.AllocateMachinePort() if err != nil { return nil, err } @@ -106,13 +111,6 @@ func (mc *MachineConfig) Unlock() { mc.lock.Unlock() } -// Write is a locking way to the machine configuration file -func (mc *MachineConfig) Write() error { - mc.Lock() - defer mc.Unlock() - return mc.write() -} - // Refresh reloads the config file from disk func (mc *MachineConfig) Refresh() error { content, err := os.ReadFile(mc.configPath.GetPath()) @@ -123,7 +121,7 @@ func (mc *MachineConfig) Refresh() error { } // write is a non-locking way to write the machine configuration file to disk -func (mc *MachineConfig) write() error { +func (mc *MachineConfig) Write() error { if mc.configPath == nil { return fmt.Errorf("no configuration file associated with vm %q", mc.Name) } @@ -132,7 +130,7 @@ func (mc *MachineConfig) write() error { return err } logrus.Debugf("writing configuration file %q", mc.configPath.Path) - return os.WriteFile(mc.configPath.GetPath(), b, define.DefaultFilePerm) + return ioutils.AtomicWriteFile(mc.configPath.GetPath(), b, define.DefaultFilePerm) } func (mc *MachineConfig) SetRootful(rootful bool) error { @@ -166,6 +164,16 @@ func (mc *MachineConfig) Remove(saveIgnition, saveImage bool) ([]string, func() return nil, nil, err } + gvProxySocket, err := mc.GVProxySocket() + if err != nil { + return nil, nil, err + } + + apiSocket, err := mc.APISocket() + if err != nil { + return nil, nil, err + } + logPath, err := mc.LogFile() if err != nil { return nil, nil, err @@ -174,6 +182,8 @@ func (mc *MachineConfig) Remove(saveIgnition, saveImage bool) ([]string, func() rmFiles := []string{ mc.configPath.GetPath(), readySocket.GetPath(), + gvProxySocket.GetPath(), + apiSocket.GetPath(), logPath.GetPath(), } if !saveImage { @@ -184,28 +194,43 @@ func (mc *MachineConfig) Remove(saveIgnition, saveImage bool) ([]string, func() } mcRemove := func() error { + var errs []error + if err := connection.RemoveConnections(mc.Name, mc.Name+"-root"); err != nil { + errs = append(errs, err) + } + if !saveIgnition { if err := ignitionFile.Delete(); err != nil { - logrus.Error(err) + errs = append(errs, err) } } if !saveImage { if err := mc.ImagePath.Delete(); err != nil { - logrus.Error(err) + errs = append(errs, err) } } - if err := mc.configPath.Delete(); err != nil { - logrus.Error(err) - } if err := readySocket.Delete(); err != nil { - logrus.Error() + errs = append(errs, err) + } + if err := gvProxySocket.Delete(); err != nil { + errs = append(errs, err) + } + if err := apiSocket.Delete(); err != nil { + errs = append(errs, err) } if err := logPath.Delete(); err != nil { - logrus.Error(err) + errs = append(errs, err) + } + + if err := mc.configPath.Delete(); err != nil { + errs = append(errs, err) + } + + if err := ports.ReleaseMachinePort(mc.SSH.Port); err != nil { + errs = append(errs, err) } - // TODO This should be bumped up into delete and called out in the text given then - // are not technically files per'se - return connection.RemoveConnections(mc.Name, mc.Name+"-root") + + return errorhandling.JoinErrors(errs) } return rmFiles, mcRemove, nil @@ -263,6 +288,14 @@ func (mc *MachineConfig) GVProxySocket() (*define.VMFile, error) { return gvProxySocket(mc.Name, machineRuntimeDir) } +func (mc *MachineConfig) APISocket() (*define.VMFile, error) { + machineRuntimeDir, err := mc.RuntimeDir() + if err != nil { + return nil, err + } + return apiSocket(mc.Name, machineRuntimeDir) +} + func (mc *MachineConfig) LogFile() (*define.VMFile, error) { rtDir, err := mc.RuntimeDir() if err != nil { @@ -297,6 +330,22 @@ func (mc *MachineConfig) IsFirstBoot() (bool, error) { return mc.LastUp == never, nil } +func (mc *MachineConfig) ConnectionInfo(vmtype define.VMType) (*define.VMFile, *define.VMFile, error) { + var ( + socket *define.VMFile + pipe *define.VMFile + ) + + if vmtype == define.HyperVVirt || vmtype == define.WSLVirt { + pipeName := env.WithPodmanPrefix(mc.Name) + pipe = &define.VMFile{Path: `\\.\pipe\` + pipeName} + return nil, pipe, nil + } + + socket, err := mc.APISocket() + return socket, nil, err +} + // LoadMachineByName returns a machine config based on the vm name and provider func LoadMachineByName(name string, dirs *define.MachineDirs) (*MachineConfig, error) { fullPath, err := dirs.ConfigDir.AppendToNewVMFile(name+".json", nil) diff --git a/pkg/machine/vmconfigs/sockets.go b/pkg/machine/vmconfigs/sockets.go index df16a44a2e..60d2a7a2a2 100644 --- a/pkg/machine/vmconfigs/sockets.go +++ b/pkg/machine/vmconfigs/sockets.go @@ -15,3 +15,7 @@ func gvProxySocket(name string, machineRuntimeDir *define.VMFile) (*define.VMFil func readySocket(name string, machineRuntimeDir *define.VMFile) (*define.VMFile, error) { return machineRuntimeDir.AppendToNewVMFile(name+".sock", nil) } + +func apiSocket(name string, socketDir *define.VMFile) (*define.VMFile, error) { + return socketDir.AppendToNewVMFile(name+"-api.sock", nil) +} diff --git a/pkg/machine/vmconfigs/sockets_darwin.go b/pkg/machine/vmconfigs/sockets_darwin.go index d7cba1b350..a28b10a3a1 100644 --- a/pkg/machine/vmconfigs/sockets_darwin.go +++ b/pkg/machine/vmconfigs/sockets_darwin.go @@ -15,3 +15,8 @@ func readySocket(name string, machineRuntimeDir *define.VMFile) (*define.VMFile, socketName := name + ".sock" return machineRuntimeDir.AppendToNewVMFile(socketName, &socketName) } + +func apiSocket(name string, socketDir *define.VMFile) (*define.VMFile, error) { + socketName := name + "-api.sock" + return socketDir.AppendToNewVMFile(socketName, &socketName) +} diff --git a/pkg/machine/volumes.go b/pkg/machine/volumes.go index b451e8c634..241361eb3a 100644 --- a/pkg/machine/volumes.go +++ b/pkg/machine/volumes.go @@ -1,11 +1,17 @@ package machine import ( - "strings" + "crypto/sha256" + "encoding/hex" "github.com/containers/podman/v5/pkg/machine/vmconfigs" ) +// NFSSELinuxContext is what is used by NFS mounts, which is allowed +// access by container_t. We need to fix the Fedora selinux policy +// to just allow access to virtiofs_t. +const NFSSELinuxContext = "system_u:object_r:nfs_t:s0" + type Volume interface { Kind() VolumeKind } @@ -29,14 +35,13 @@ func (v VirtIoFs) Kind() string { return string(VirtIOFsVk) } -// unitName is the fq path where /'s are replaced with -'s -func (v VirtIoFs) unitName() string { - // delete the leading - - unit := strings.ReplaceAll(v.Target, "/", "-") - if strings.HasPrefix(unit, "-") { - return unit[1:] - } - return unit +// generateTag generates a tag for VirtIOFs mounts. +// AppleHV requires tags to be 36 bytes or fewer. +// SHA256 the path, then truncate to 36 bytes +func (v VirtIoFs) generateTag() string { + sum := sha256.Sum256([]byte(v.Target)) + stringSum := hex.EncodeToString(sum[:]) + return stringSum[:36] } func (v VirtIoFs) ToMount() vmconfigs.Mount { @@ -58,7 +63,7 @@ func NewVirtIoFsMount(src, target string, readOnly bool) VirtIoFs { Source: src, Target: target, } - vfs.Tag = vfs.unitName() + vfs.Tag = vfs.generateTag() return vfs } diff --git a/pkg/machine/wsl/config.go b/pkg/machine/wsl/config.go index 4c7b61714a..8cb8725d70 100644 --- a/pkg/machine/wsl/config.go +++ b/pkg/machine/wsl/config.go @@ -150,10 +150,6 @@ func (p *WSLVirtualization) IsValidVMName(name string) (bool, error) { return false, nil } -func (p *WSLVirtualization) CheckExclusiveActiveVM() (bool, string, error) { - return false, "", nil -} - // RemoveAndCleanMachines removes all machine and cleans up any other files associated with podman machine func (p *WSLVirtualization) RemoveAndCleanMachines() error { var ( diff --git a/pkg/machine/wsl/declares.go b/pkg/machine/wsl/declares.go index a8a8179008..a1f13ae310 100644 --- a/pkg/machine/wsl/declares.go +++ b/pkg/machine/wsl/declares.go @@ -19,6 +19,7 @@ const registriesConf = `unqualified-search-registries=["docker.io"] const appendPort = `grep -q Port\ %d /etc/ssh/sshd_config || echo Port %d >> /etc/ssh/sshd_config` +//nolint:unused const changePort = `sed -E -i 's/^Port[[:space:]]+[0-9]+/Port %d/' /etc/ssh/sshd_config` const configServices = `ln -fs /usr/lib/systemd/system/sshd.service /etc/systemd/system/multi-user.target.wants/sshd.service @@ -174,43 +175,6 @@ SocketMode=0660 SocketGroup=wheel ` -const proxyConfigSetup = `#!/bin/bash - -SYSTEMD_CONF=/etc/systemd/system.conf.d/default-env.conf -ENVD_CONF=/etc/environment.d/default-env.conf -PROFILE_CONF=/etc/profile.d/default-env.sh - -IFS="|" -read proxies - -mkdir -p /etc/profile.d /etc/environment.d /etc/systemd/system.conf.d/ -rm -f $SYSTEMD_CONF -for proxy in $proxies; do - output+="$proxy " -done -echo "[Manager]" >> $SYSTEMD_CONF -echo -ne "DefaultEnvironment=" >> $SYSTEMD_CONF - -echo $output >> $SYSTEMD_CONF -rm -f $ENVD_CONF -for proxy in $proxies; do - echo "$proxy" >> $ENVD_CONF -done -rm -f $PROFILE_CONF -for proxy in $proxies; do - echo "export $proxy" >> $PROFILE_CONF -done -` - -const proxyConfigAttempt = `if [ -f /usr/local/bin/proxyinit ]; \ -then /usr/local/bin/proxyinit; \ -else exit 42; \ -fi` - -const clearProxySettings = `rm -f /etc/systemd/system.conf.d/default-env.conf \ - /etc/environment.d/default-env.conf \ - /etc/profile.d/default-env.sh` - const wslInstallError = `Could not %s. See previous output for any potential failure details. If you can not resolve the issue, and rerunning fails, try the "wsl --install" process outlined in the following article: @@ -240,7 +204,7 @@ http://docs.microsoft.com/en-us/windows/wsl/install\ const ( gvProxy = "gvproxy.exe" - winSShProxy = "win-sshproxy.exe" + winSSHProxy = "win-sshproxy.exe" pipePrefix = "npipe:////./pipe/" globalPipe = "docker_engine" userModeDist = "podman-net-usermode" diff --git a/pkg/machine/wsl/fedora.go b/pkg/machine/wsl/fedora.go index ebd95a7cb3..1b88716cf1 100644 --- a/pkg/machine/wsl/fedora.go +++ b/pkg/machine/wsl/fedora.go @@ -12,12 +12,12 @@ import ( "time" "github.com/containers/podman/v5/pkg/machine" + "github.com/containers/podman/v5/version" "github.com/sirupsen/logrus" ) const ( - githubX86ReleaseURL = "https://github.com/containers/podman-wsl-fedora/releases/latest/download/rootfs.tar.xz" - githubArmReleaseURL = "https://github.com/containers/podman-wsl-fedora-arm/releases/latest/download/rootfs.tar.xz" + latestReleaseURL = "https://github.com/containers/podman-machine-wsl-os/releases/latest/download" ) type FedoraDownload struct { @@ -46,23 +46,20 @@ func (f FedoraDownload) CleanCache() error { } func GetFedoraDownloadForWSL() (*url.URL, string, string, int64, error) { - var releaseURL string arch := machine.DetermineMachineArch() - switch arch { - case "arm64": - releaseURL = githubArmReleaseURL - case "amd64": - releaseURL = githubX86ReleaseURL - default: + if arch != "amd64" && arch != "arm64" { return nil, "", "", -1, fmt.Errorf("CPU architecture %q is not supported", arch) } - downloadURL, err := url.Parse(releaseURL) + releaseURL, err := url.Parse(latestReleaseURL) if err != nil { - return nil, "", "", -1, fmt.Errorf("invalid URL generated from discovered Fedora file: %s: %w", releaseURL, err) + return nil, "", "", -1, fmt.Errorf("could not parse release URL: %s: %w", releaseURL, err) } - resp, err := http.Head(releaseURL) + rootFs := fmt.Sprintf("%d.%d-rootfs-%s.tar.zst", version.Version.Major, version.Version.Minor, arch) + rootFsURL := appendToURL(releaseURL, rootFs) + + resp, err := http.Head(rootFsURL.String()) if err != nil { return nil, "", "", -1, fmt.Errorf("head request failed: %s: %w", releaseURL, err) } @@ -70,12 +67,10 @@ func GetFedoraDownloadForWSL() (*url.URL, string, string, int64, error) { contentLen := resp.ContentLength if resp.StatusCode != http.StatusOK { - return nil, "", "", -1, fmt.Errorf("head request failed: %s: %w", releaseURL, err) + return nil, "", "", -1, fmt.Errorf("head request failed: %s: %w", rootFsURL, err) } - verURL := *downloadURL - verURL.Path = path.Join(path.Dir(downloadURL.Path), "version") - + verURL := appendToURL(releaseURL, "version") resp, err = http.Get(verURL.String()) if err != nil { return nil, "", "", -1, fmt.Errorf("get request failed: %s: %w", verURL.String(), err) @@ -83,12 +78,18 @@ func GetFedoraDownloadForWSL() (*url.URL, string, string, int64, error) { defer func() { if err := resp.Body.Close(); err != nil { - logrus.Errorf("error closing http boddy: %q", err) + logrus.Errorf("error closing http body: %q", err) } }() b, err := io.ReadAll(&io.LimitedReader{R: resp.Body, N: 1024}) if err != nil { return nil, "", "", -1, fmt.Errorf("failed reading: %s: %w", verURL.String(), err) } - return downloadURL, strings.TrimSpace(string(b)), arch, contentLen, nil + return rootFsURL, strings.TrimSpace(string(b)), arch, contentLen, nil +} + +func appendToURL(url *url.URL, elem string) *url.URL { + newURL := *url + newURL.Path = path.Join(url.Path, elem) + return &newURL } diff --git a/pkg/machine/wsl/machine.go b/pkg/machine/wsl/machine.go index e9196f28a9..38c1a91261 100644 --- a/pkg/machine/wsl/machine.go +++ b/pkg/machine/wsl/machine.go @@ -4,6 +4,7 @@ package wsl import ( "bufio" + "bytes" "errors" "fmt" "io" @@ -15,8 +16,10 @@ import ( "time" "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/strongunits" "github.com/containers/podman/v5/pkg/machine" "github.com/containers/podman/v5/pkg/machine/define" + "github.com/containers/podman/v5/pkg/machine/env" "github.com/containers/podman/v5/pkg/machine/ignition" "github.com/containers/podman/v5/pkg/machine/vmconfigs" "github.com/containers/podman/v5/pkg/machine/wsl/wutil" @@ -40,12 +43,14 @@ func (e *ExitCodeError) Error() string { return fmt.Sprintf("Process failed with exit code: %d", e.code) } +//nolint:unused func getConfigPath(name string) (string, error) { return getConfigPathExt(name, "json") } +//nolint:unused func getConfigPathExt(name string, extension string) (string, error) { - vmConfigDir, err := machine.GetConfDir(vmtype) + vmConfigDir, err := env.GetConfDir(vmtype) if err != nil { return "", err } @@ -56,7 +61,7 @@ func getConfigPathExt(name string, extension string) (string, error) { // TODO like provisionWSL, i think this needs to be pushed to use common // paths and types where possible func unprovisionWSL(mc *vmconfigs.MachineConfig) error { - dist := machine.ToDist(mc.Name) + dist := env.WithPodmanPrefix(mc.Name) if err := terminateDist(dist); err != nil { logrus.Error(err) } @@ -64,7 +69,7 @@ func unprovisionWSL(mc *vmconfigs.MachineConfig) error { logrus.Error(err) } - vmDataDir, err := machine.GetDataDir(vmtype) + vmDataDir, err := env.GetDataDir(vmtype) if err != nil { return err } @@ -77,7 +82,7 @@ func unprovisionWSL(mc *vmconfigs.MachineConfig) error { // we should push this stuff be more common (dir names, etc) and also use // typed things where possible like vmfiles func provisionWSLDist(name string, imagePath string, prompt string) (string, error) { - vmDataDir, err := machine.GetDataDir(vmtype) + vmDataDir, err := env.GetDataDir(vmtype) if err != nil { return "", err } @@ -88,7 +93,7 @@ func provisionWSLDist(name string, imagePath string, prompt string) (string, err return "", fmt.Errorf("could not create wsldist directory: %w", err) } - dist := machine.ToDist(name) + dist := env.WithPodmanPrefix(name) fmt.Println(prompt) if err = runCmdPassThrough(wutil.FindWSL(), "--import", dist, distTarget, imagePath, "--version", "2"); err != nil { return "", fmt.Errorf("the WSL import of guest OS failed: %w", err) @@ -99,6 +104,14 @@ func provisionWSLDist(name string, imagePath string, prompt string) (string, err return "", fmt.Errorf("package permissions restore of shadow-utils on guest OS failed: %w", err) } + if err = wslInvoke(dist, "mkdir", "-p", "/usr/local/bin"); err != nil { + return "", fmt.Errorf("could not create /usr/local/bin: %w", err) + } + + if err = wslInvoke(dist, "ln", "-f", "-s", gvForwarderPath, "/usr/local/bin/vm"); err != nil { + return "", fmt.Errorf("could not setup compatibility link: %w", err) + } + return dist, nil } @@ -242,41 +255,6 @@ func setupPodmanDockerSock(dist string, rootful bool) error { return nil } -func configureProxy(dist string, useProxy bool, quiet bool) error { - if !useProxy { - _ = wslInvoke(dist, "sh", "-c", clearProxySettings) - return nil - } - var content string - for i, key := range config.ProxyEnv { - if value, _ := os.LookupEnv(key); len(value) > 0 { - var suffix string - if i < (len(config.ProxyEnv) - 1) { - suffix = "|" - } - content = fmt.Sprintf("%s%s=\"%s\"%s", content, key, value, suffix) - } - } - - if err := wslPipe(content, dist, "sh", "-c", proxyConfigAttempt); err != nil { - const failMessage = "Failure creating proxy configuration" - if exitErr, isExit := err.(*exec.ExitError); isExit && exitErr.ExitCode() != 42 { - return fmt.Errorf("%v: %w", failMessage, err) - } - if !quiet { - fmt.Println("Installing proxy support") - } - _ = wslPipe(proxyConfigSetup, dist, "sh", "-c", - "cat > /usr/local/bin/proxyinit; chmod 755 /usr/local/bin/proxyinit") - - if err = wslPipe(content, dist, "/usr/local/bin/proxyinit"); err != nil { - return fmt.Errorf("%v: %w", failMessage, err) - } - } - - return nil -} - func enableUserLinger(mc *vmconfigs.MachineConfig, dist string) error { lingerCmd := "mkdir -p /var/lib/systemd/linger; touch /var/lib/systemd/linger/" + mc.SSH.RemoteUsername if err := wslInvoke(dist, "sh", "-c", lingerCmd); err != nil { @@ -315,11 +293,6 @@ func installScripts(dist string) error { return fmt.Errorf("could not create bootstrap script for guest OS: %w", err) } - if err := wslPipe(proxyConfigSetup, dist, "sh", "-c", - "cat > /usr/local/bin/proxyinit; chmod 755 /usr/local/bin/proxyinit"); err != nil { - return fmt.Errorf("could not create proxyinit script for guest OS: %w", err) - } - return nil } @@ -336,7 +309,7 @@ func checkAndInstallWSL(reExec bool) (bool, error) { return true, nil } - admin := hasAdminRights() + admin := HasAdminRights() if !IsWSLFeatureEnabled() { return false, attemptFeatureInstall(reExec, admin) @@ -396,7 +369,9 @@ func attemptFeatureInstall(reExec, admin bool) error { } func launchElevate(operation string) error { - truncateElevatedOutputFile() + if err := truncateElevatedOutputFile(); err != nil { + return err + } err := relaunchElevatedWait() if err != nil { if eerr, ok := err.(*ExitCodeError); ok { @@ -565,6 +540,7 @@ func wslPipe(input string, dist string, arg ...string) error { return pipeCmdPassThrough(wutil.FindWSL(), input, newArgs...) } +//nolint:unused func wslCreateKeys(identityPath string, dist string) (string, error) { return machine.CreateSSHKeysPrefix(identityPath, true, true, wutil.FindWSL(), "-u", "root", "-d", dist) } @@ -575,7 +551,10 @@ func runCmdPassThrough(name string, arg ...string) error { cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr - return cmd.Run() + if err := cmd.Run(); err != nil { + return fmt.Errorf("command %s %v failed: %w", name, arg, err) + } + return nil } func runCmdPassThroughTee(out io.Writer, name string, arg ...string) error { @@ -587,7 +566,10 @@ func runCmdPassThroughTee(out io.Writer, name string, arg ...string) error { cmd.Stdin = os.Stdin cmd.Stdout = io.MultiWriter(os.Stdout, out) cmd.Stderr = io.MultiWriter(os.Stderr, out) - return cmd.Run() + if err := cmd.Run(); err != nil { + return fmt.Errorf("command %s %v failed: %w", name, arg, err) + } + return nil } func pipeCmdPassThrough(name string, input string, arg ...string) error { @@ -596,7 +578,10 @@ func pipeCmdPassThrough(name string, input string, arg ...string) error { cmd.Stdin = strings.NewReader(input) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr - return cmd.Run() + if err := cmd.Run(); err != nil { + return fmt.Errorf("command %s %v failed: %w", name, arg, err) + } + return nil } func setupWslProxyEnv() (hasProxy bool) { @@ -619,8 +604,9 @@ func setupWslProxyEnv() (hasProxy bool) { return } +//nolint:unused func obtainGlobalConfigLock() (*fileLock, error) { - lockDir, err := machine.GetGlobalDataDir() + lockDir, err := env.GetGlobalDataDir() if err != nil { return nil, err } @@ -662,8 +648,10 @@ func getAllWSLDistros(running bool) (map[string]struct{}, error) { if err != nil { return nil, err } + stderr := &bytes.Buffer{} + cmd.Stderr = stderr if err = cmd.Start(); err != nil { - return nil, err + return nil, fmt.Errorf("failed to start command %s %v: %w", cmd.Path, args, err) } all := make(map[string]struct{}) @@ -675,7 +663,10 @@ func getAllWSLDistros(running bool) (map[string]struct{}, error) { } } - _ = cmd.Wait() + err = cmd.Wait() + if err != nil { + return nil, fmt.Errorf("command %s %v failed: %w (%s)", cmd.Path, args, err, strings.TrimSpace(stderr.String())) + } return all, nil } @@ -687,6 +678,8 @@ func isSystemdRunning(dist string) (bool, error) { if err != nil { return false, err } + stderr := &bytes.Buffer{} + cmd.Stderr = stderr if err = cmd.Start(); err != nil { return false, err } @@ -700,23 +693,34 @@ func isSystemdRunning(dist string) (bool, error) { } } - _ = cmd.Wait() + err = cmd.Wait() + if err != nil { + return false, fmt.Errorf("command %s %v failed: %w (%s)", cmd.Path, cmd.Args, err, strings.TrimSpace(stderr.String())) + } return result, nil } func terminateDist(dist string) error { cmd := exec.Command(wutil.FindWSL(), "--terminate", dist) - return cmd.Run() + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("command %s %v failed: %w (%s)", cmd.Path, cmd.Args, err, strings.TrimSpace(string(out))) + } + return nil } func unregisterDist(dist string) error { cmd := exec.Command(wutil.FindWSL(), "--unregister", dist) - return cmd.Run() + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("command %s %v failed: %w (%s)", cmd.Path, cmd.Args, err, strings.TrimSpace(string(out))) + } + return nil } func isRunning(name string) (bool, error) { - dist := machine.ToDist(name) + dist := env.WithPodmanPrefix(name) wsl, err := isWSLRunning(dist) if err != nil { return false, err @@ -734,8 +738,9 @@ func isRunning(name string) (bool, error) { return sysd, err } -func getDiskSize(name string) uint64 { - vmDataDir, err := machine.GetDataDir(vmtype) +//nolint:unused +func getDiskSize(name string) strongunits.GiB { + vmDataDir, err := env.GetDataDir(vmtype) if err != nil { return 0 } @@ -745,11 +750,12 @@ func getDiskSize(name string) uint64 { if err != nil { return 0 } - return uint64(info.Size()) + return strongunits.ToGiB(strongunits.B(info.Size())) } +//nolint:unused func getCPUs(name string) (uint64, error) { - dist := machine.ToDist(name) + dist := env.WithPodmanPrefix(name) if run, _ := isWSLRunning(dist); !run { return 0, nil } @@ -758,6 +764,8 @@ func getCPUs(name string) (uint64, error) { if err != nil { return 0, err } + stderr := &bytes.Buffer{} + cmd.Stderr = stderr if err = cmd.Start(); err != nil { return 0, err } @@ -766,14 +774,18 @@ func getCPUs(name string) (uint64, error) { for scanner.Scan() { result = scanner.Text() } - _ = cmd.Wait() + err = cmd.Wait() + if err != nil { + return 0, fmt.Errorf("command %s %v failed: %w (%s)", cmd.Path, cmd.Args, err, strings.TrimSpace(strings.TrimSpace(stderr.String()))) + } ret, err := strconv.Atoi(result) return uint64(ret), err } -func getMem(name string) (uint64, error) { - dist := machine.ToDist(name) +//nolint:unused +func getMem(name string) (strongunits.MiB, error) { + dist := env.WithPodmanPrefix(name) if run, _ := isWSLRunning(dist); !run { return 0, nil } @@ -782,6 +794,8 @@ func getMem(name string) (uint64, error) { if err != nil { return 0, err } + stderr := &bytes.Buffer{} + cmd.Stderr = stderr if err = cmd.Start(); err != nil { return 0, err } @@ -791,23 +805,28 @@ func getMem(name string) (uint64, error) { t, a int ) for scanner.Scan() { + // fields are in kB so div to mb fields := strings.Fields(scanner.Text()) if strings.HasPrefix(fields[0], "MemTotal") && len(fields) >= 2 { t, err = strconv.Atoi(fields[1]) - total = uint64(t) * 1024 + total = uint64(t) / 1024 } else if strings.HasPrefix(fields[0], "MemAvailable") && len(fields) >= 2 { a, err = strconv.Atoi(fields[1]) - available = uint64(a) * 1024 + available = uint64(a) / 1024 } if err != nil { break } } - _ = cmd.Wait() + err = cmd.Wait() + if err != nil { + return 0, fmt.Errorf("command %s %v failed: %w (%s)", cmd.Path, cmd.Args, err, strings.TrimSpace(stderr.String())) + } - return total - available, err + return strongunits.MiB(total - available), err } +//nolint:unused func getResources(mc *vmconfigs.MachineConfig) (resources vmconfigs.ResourceConfig) { resources.CPUs, _ = getCPUs(mc.Name) resources.Memory, _ = getMem(mc.Name) diff --git a/pkg/machine/wsl/stubber.go b/pkg/machine/wsl/stubber.go index 1af182e1c0..3a79f095ca 100644 --- a/pkg/machine/wsl/stubber.go +++ b/pkg/machine/wsl/stubber.go @@ -3,6 +3,7 @@ package wsl import ( + "bytes" "errors" "fmt" "os" @@ -10,9 +11,12 @@ import ( "path/filepath" "strings" + "github.com/containers/podman/v5/pkg/machine/env" "github.com/containers/podman/v5/pkg/machine/ocipull" + "github.com/containers/podman/v5/pkg/machine/shim/diskpull" "github.com/containers/podman/v5/pkg/machine/stdpull" "github.com/containers/podman/v5/pkg/machine/wsl/wutil" + "github.com/containers/podman/v5/utils" gvproxy "github.com/containers/gvisor-tap-vsock/pkg/types" "github.com/containers/podman/v5/pkg/machine" @@ -31,7 +35,7 @@ func (w WSLStubber) CreateVM(opts define.CreateVMOpts, mc *vmconfigs.MachineConf err error ) // cleanup half-baked files if init fails at any point - callbackFuncs := machine.InitCleanup() + callbackFuncs := machine.CleanUp() defer callbackFuncs.CleanIfErr(&err) go callbackFuncs.CleanOnSignal() mc.WSLHypervisor = new(vmconfigs.WSLConfig) @@ -90,7 +94,7 @@ func (w WSLStubber) PrepareIgnition(_ *vmconfigs.MachineConfig, _ *ignition.Igni } func (w WSLStubber) Exists(name string) (bool, error) { - return isWSLExist(machine.ToDist(name)) + return isWSLExist(env.WithPodmanPrefix(name)) } func (w WSLStubber) MountType() vmconfigs.VolumeMountType { @@ -106,10 +110,10 @@ func (w WSLStubber) Remove(mc *vmconfigs.MachineConfig) ([]string, func() error, // below if we wanted to hard error on the wsl unregister // of the vm wslRemoveFunc := func() error { - if err := runCmdPassThrough(wutil.FindWSL(), "--unregister", machine.ToDist(mc.Name)); err != nil { - logrus.Error(err) + if err := runCmdPassThrough(wutil.FindWSL(), "--unregister", env.WithPodmanPrefix(mc.Name)); err != nil { + return err } - return machine.ReleaseMachinePort(mc.SSH.Port) + return nil } return []string{}, wslRemoveFunc, nil @@ -120,9 +124,6 @@ func (w WSLStubber) RemoveAndCleanMachines(_ *define.MachineDirs) error { } func (w WSLStubber) SetProviderAttrs(mc *vmconfigs.MachineConfig, opts define.SetOptions) error { - mc.Lock() - defer mc.Unlock() - state, err := w.State(mc, false) if err != nil { return err @@ -158,9 +159,9 @@ func (w WSLStubber) SetProviderAttrs(mc *vmconfigs.MachineConfig, opts define.Se return errors.New("user-mode networking can only be changed when the machine is not running") } - dist := machine.ToDist(mc.Name) + dist := env.WithPodmanPrefix(mc.Name) if err := changeDistUserModeNetworking(dist, mc.SSH.RemoteUsername, mc.ImagePath.GetPath(), *opts.UserModeNetworking); err != nil { - return fmt.Errorf("failure changing state of user-mode networking setting", err) + return fmt.Errorf("failure changing state of user-mode networking setting: %w", err) } mc.WSLHypervisor.UserModeNetworking = *opts.UserModeNetworking @@ -204,22 +205,7 @@ func (w WSLStubber) PostStartNetworking(mc *vmconfigs.MachineConfig, noInfo bool } func (w WSLStubber) StartVM(mc *vmconfigs.MachineConfig) (func() error, func() error, error) { - useProxy := setupWslProxyEnv() - dist := machine.ToDist(mc.Name) - - // TODO Quiet is hard set to false: follow up - if err := configureProxy(dist, useProxy, false); err != nil { - return nil, nil, err - } - - // TODO The original code checked to see if the SSH port was actually open and re-assigned if it was - // we could consider this but it should be higher up the stack - // if !machine.IsLocalPortAvailable(v.Port) { - // logrus.Warnf("SSH port conflict detected, reassigning a new port") - // if err := v.reassignSshPort(); err != nil { - // return err - // } - // } + dist := env.WithPodmanPrefix(mc.Name) err := wslInvoke(dist, "/root/bootstrap") if err != nil { @@ -248,15 +234,12 @@ func (w WSLStubber) StopVM(mc *vmconfigs.MachineConfig, hardStop bool) error { var ( err error ) - mc.Lock() - defer mc.Unlock() - // recheck after lock if running, err := isRunning(mc.Name); !running { return err } - dist := machine.ToDist(mc.Name) + dist := env.WithPodmanPrefix(mc.Name) // Stop user-mode networking if enabled if err := stopUserModeNetworking(mc); err != nil { @@ -269,17 +252,21 @@ func (w WSLStubber) StopVM(mc *vmconfigs.MachineConfig, hardStop bool) error { cmd := exec.Command(wutil.FindWSL(), "-u", "root", "-d", dist, "sh") cmd.Stdin = strings.NewReader(waitTerm) + out := &bytes.Buffer{} + cmd.Stderr = out + cmd.Stdout = out + if err = cmd.Start(); err != nil { return fmt.Errorf("executing wait command: %w", err) } exitCmd := exec.Command(wutil.FindWSL(), "-u", "root", "-d", dist, "/usr/local/bin/enterns", "systemctl", "exit", "0") if err = exitCmd.Run(); err != nil { - return fmt.Errorf("stopping sysd: %w", err) + return fmt.Errorf("stopping systemd: %w", err) } if err = cmd.Wait(); err != nil { - return err + logrus.Warnf("Failed to wait for systemd to exit: (%s)", strings.TrimSpace(out.String())) } return terminateDist(dist) @@ -289,15 +276,29 @@ func (w WSLStubber) StopHostNetworking(mc *vmconfigs.MachineConfig, vmType defin return stopUserModeNetworking(mc) } +func (w WSLStubber) UpdateSSHPort(mc *vmconfigs.MachineConfig, port int) error { + dist := env.WithPodmanPrefix(mc.Name) + + if err := wslInvoke(dist, "sh", "-c", fmt.Sprintf(changePort, port)); err != nil { + return fmt.Errorf("could not change SSH port for guest OS: %w", err) + } + + return nil +} + func (w WSLStubber) VMType() define.VMType { return define.WSLVirt } -func (w WSLStubber) GetDisk(_ string, dirs *define.MachineDirs, mc *vmconfigs.MachineConfig) error { +func (w WSLStubber) GetDisk(userInputPath string, dirs *define.MachineDirs, mc *vmconfigs.MachineConfig) error { var ( myDisk ocipull.Disker ) + if userInputPath != "" { + return diskpull.GetDisk(userInputPath, dirs, mc.ImagePath, w.VMType(), mc.Name) + } + // check github for the latest version of the WSL dist downloadURL, downloadVersion, _, _, err := GetFedoraDownloadForWSL() if err != nil { @@ -308,8 +309,7 @@ func (w WSLStubber) GetDisk(_ string, dirs *define.MachineDirs, mc *vmconfigs.Ma // i.e.v39.0.31-rootfs.tar.xz versionedBase := fmt.Sprintf("%s-%s", downloadVersion, filepath.Base(downloadURL.Path)) - // TODO we need a mechanism for "flushing" old cache files - cachedFile, err := dirs.DataDir.AppendToNewVMFile(versionedBase, nil) + cachedFile, err := dirs.ImageCacheDir.AppendToNewVMFile(versionedBase, nil) if err != nil { return err } @@ -318,14 +318,36 @@ func (w WSLStubber) GetDisk(_ string, dirs *define.MachineDirs, mc *vmconfigs.Ma if _, err = os.Stat(cachedFile.GetPath()); err == nil { logrus.Debugf("%q already exists locally", cachedFile.GetPath()) myDisk, err = stdpull.NewStdDiskPull(cachedFile.GetPath(), mc.ImagePath) + if err != nil { + return err + } } else { - // no cached file - myDisk, err = stdpull.NewDiskFromURL(downloadURL.String(), mc.ImagePath, dirs.DataDir, &versionedBase) - } - if err != nil { - return err + files, err := os.ReadDir(dirs.ImageCacheDir.GetPath()) + if err != nil { + logrus.Warn("failed to clean machine image cache: ", err) + } else { + defer func() { + for _, file := range files { + path := filepath.Join(dirs.ImageCacheDir.GetPath(), file.Name()) + logrus.Debugf("cleaning cached image: %s", path) + err := utils.GuardedRemoveAll(path) + if err != nil && !errors.Is(err, os.ErrNotExist) { + logrus.Warn("failed to clean machine image cache: ", err) + } + } + }() + } + + myDisk, err = stdpull.NewDiskFromURL(downloadURL.String(), mc.ImagePath, dirs.ImageCacheDir, &versionedBase, true) + if err != nil { + return err + } } // up until now, nothing has really happened // pull if needed and decompress to image location return myDisk.Get() } + +func (w WSLStubber) GetRosetta(mc *vmconfigs.MachineConfig) (bool, error) { + return false, nil +} diff --git a/pkg/machine/wsl/usermodenet.go b/pkg/machine/wsl/usermodenet.go index 1c7ef4eeda..0f09874e0f 100644 --- a/pkg/machine/wsl/usermodenet.go +++ b/pkg/machine/wsl/usermodenet.go @@ -5,17 +5,20 @@ package wsl import ( "errors" "fmt" - "github.com/containers/podman/v5/pkg/machine/vmconfigs" "os" "os/exec" "path/filepath" "github.com/containers/podman/v5/pkg/machine" + "github.com/containers/podman/v5/pkg/machine/env" + "github.com/containers/podman/v5/pkg/machine/vmconfigs" "github.com/containers/podman/v5/pkg/machine/wsl/wutil" "github.com/containers/podman/v5/pkg/specgen" "github.com/sirupsen/logrus" ) +const gvForwarderPath = "/usr/libexec/podman/gvforwarder" + const startUserModeNet = ` set -e STATE=/mnt/wsl/podman-usermodenet @@ -29,10 +32,10 @@ fi if [[ ! $ROUTE =~ default\ via ]]; then exit 3 fi -nohup /usr/local/bin/vm -iface podman-usermode -stop-if-exist ignore -url "stdio:$GVPROXY?listen-stdio=accept" > /var/log/vm.log 2> /var/log/vm.err < /dev/null & +nohup $GVFORWARDER -iface podman-usermode -stop-if-exist ignore -url "stdio:$GVPROXY?listen-stdio=accept" > /var/log/vm.log 2> /var/log/vm.err < /dev/null & echo $! > $STATE/vm.pid sleep 1 -ps -eo args | grep -q -m1 ^/usr/local/bin/vm || exit 42 +ps -eo args | grep -q -m1 ^$GVFORWARDER || exit 42 ` const stopUserModeNet = ` @@ -84,7 +87,9 @@ func startUserModeNetworking(mc *vmconfigs.MachineConfig) error { if err != nil { return err } - defer flock.unlock() + defer func() { + _ = flock.unlock() + }() running, err := isWSLRunning(userModeDist) if err != nil { @@ -99,7 +104,7 @@ func startUserModeNetworking(mc *vmconfigs.MachineConfig) error { } } - if err := createUserModeResolvConf(machine.ToDist(mc.Name)); err != nil { + if err := createUserModeResolvConf(env.WithPodmanPrefix(mc.Name)); err != nil { return err } @@ -121,7 +126,9 @@ func stopUserModeNetworking(mc *vmconfigs.MachineConfig) error { if err != nil { return err } - defer flock.unlock() + defer func() { + _ = flock.unlock() + }() err = removeUserModeNetEntry(mc.Name) if err != nil { @@ -157,7 +164,8 @@ func stopUserModeNetworking(mc *vmconfigs.MachineConfig) error { } func isGvProxyVMRunning() bool { - return wslInvoke(userModeDist, "bash", "-c", "ps -eo args | grep -q -m1 ^/usr/local/bin/vm || exit 42") == nil + cmd := fmt.Sprintf("ps -eo args | grep -q -m1 ^%s || exit 42", gvForwarderPath) + return wslInvoke(userModeDist, "bash", "-c", cmd) == nil } func launchUserModeNetDist(exeFile string) error { @@ -168,7 +176,7 @@ func launchUserModeNetDist(exeFile string) error { return err } - cmdStr := fmt.Sprintf("GVPROXY=%q\n%s", exe, startUserModeNet) + cmdStr := fmt.Sprintf("GVPROXY=%q\nGVFORWARDER=%q\n%s", exe, gvForwarderPath, startUserModeNet) if err := wslPipe(cmdStr, userModeDist, "bash"); err != nil { _ = terminateDist(userModeDist) @@ -197,8 +205,19 @@ func installUserModeDist(dist string, imagePath string) error { return err } + if exists { + if err := wslInvoke(userModeDist, "test", "-f", gvForwarderPath); err != nil { + fmt.Println("Replacing old user-mode distribution...") + _ = terminateDist(userModeDist) + if err := unregisterDist(userModeDist); err != nil { + return err + } + exists = false + } + } + if !exists { - if err := wslInvoke(dist, "test", "-f", "/usr/local/bin/vm"); err != nil { + if err := wslInvoke(dist, "test", "-f", gvForwarderPath); err != nil { return fmt.Errorf("existing machine is too old, can't install user-mode networking dist until machine is reinstalled (using podman machine rm, then podman machine init)") } @@ -222,7 +241,7 @@ func createUserModeResolvConf(dist string) error { } func getUserModeNetDir() (string, error) { - vmDataDir, err := machine.GetDataDir(vmtype) + vmDataDir, err := env.GetDataDir(vmtype) if err != nil { return "", err } @@ -255,7 +274,7 @@ func addUserModeNetEntry(mc *vmconfigs.MachineConfig) error { return err } - path := filepath.Join(entriesDir, machine.ToDist(mc.Name)) + path := filepath.Join(entriesDir, env.WithPodmanPrefix(mc.Name)) file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) if err != nil { return fmt.Errorf("could not add user-mode networking registration: %w", err) @@ -270,7 +289,7 @@ func removeUserModeNetEntry(name string) error { return err } - path := filepath.Join(entriesDir, machine.ToDist(name)) + path := filepath.Join(entriesDir, env.WithPodmanPrefix(name)) return os.Remove(path) } @@ -322,7 +341,7 @@ func obtainUserModeNetLock() (*fileLock, error) { func changeDistUserModeNetworking(dist string, user string, image string, enable bool) error { // Only install if user-mode is being enabled and there was an image path passed if enable { - if len(image) <= 0 { + if len(image) == 0 { return errors.New("existing machine configuration is corrupt, no image is defined") } if err := installUserModeDist(dist, image); err != nil { diff --git a/pkg/machine/wsl/util_windows.go b/pkg/machine/wsl/util_windows.go index 3046dca855..d50f73b915 100644 --- a/pkg/machine/wsl/util_windows.go +++ b/pkg/machine/wsl/util_windows.go @@ -3,7 +3,9 @@ package wsl import ( + "bytes" "encoding/base64" + "encoding/binary" "errors" "fmt" "os" @@ -13,13 +15,14 @@ import ( "unicode/utf16" "unsafe" + "github.com/Microsoft/go-winio" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/homedir" "github.com/sirupsen/logrus" "golang.org/x/sys/windows" "golang.org/x/sys/windows/registry" ) -// nolint type SHELLEXECUTEINFO struct { cbSize uint32 fMask uint32 @@ -38,7 +41,6 @@ type SHELLEXECUTEINFO struct { hProcess syscall.Handle } -// nolint type Luid struct { lowPart uint32 highPart int32 @@ -54,19 +56,25 @@ type TokenPrivileges struct { privileges [1]LuidAndAttributes } -// nolint // Cleaner to refer to the official OS constant names, and consistent with syscall +// Cleaner to refer to the official OS constant names, and consistent with syscall +// Ref: https://learn.microsoft.com/en-us/windows/win32/api/shellapi/ns-shellapi-shellexecuteinfow#members const ( - SEE_MASK_NOCLOSEPROCESS = 0x40 - EWX_FORCEIFHUNG = 0x10 - EWX_REBOOT = 0x02 - EWX_RESTARTAPPS = 0x40 - SHTDN_REASON_MAJOR_APPLICATION = 0x00040000 - SHTDN_REASON_MINOR_INSTALLATION = 0x00000002 - SHTDN_REASON_FLAG_PLANNED = 0x80000000 - TOKEN_ADJUST_PRIVILEGES = 0x0020 - TOKEN_QUERY = 0x0008 - SE_PRIVILEGE_ENABLED = 0x00000002 - SE_ERR_ACCESSDENIED = 0x05 + //nolint:stylecheck + SEE_MASK_NOCLOSEPROCESS = 0x40 + //nolint:stylecheck + SE_ERR_ACCESSDENIED = 0x05 +) + +const ( + // ref: https://learn.microsoft.com/en-us/windows/win32/secauthz/privilege-constants#constants + rebootPrivilege = "SeShutdownPrivilege" + + // "Application: Installation (Planned)" A planned restart or shutdown to perform application installation. + // ref: https://learn.microsoft.com/en-us/windows/win32/shutdown/system-shutdown-reason-codes + rebootReason = windows.SHTDN_REASON_MAJOR_APPLICATION | windows.SHTDN_REASON_MINOR_INSTALLATION | windows.SHTDN_REASON_FLAG_PLANNED + + // ref: https://learn.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-exitwindowsex#parameters + rebootFlags = windows.EWX_REBOOT | windows.EWX_RESTARTAPPS | windows.EWX_FORCEIFHUNG ) func winVersionAtLeast(major uint, minor uint, build uint) bool { @@ -87,7 +95,7 @@ func winVersionAtLeast(major uint, minor uint, build uint) bool { return true } -func hasAdminRights() bool { +func HasAdminRights() bool { var sid *windows.SID // See: https://coolaj86.com/articles/golang-and-windows-and-admins-oh-my/ @@ -101,7 +109,9 @@ func hasAdminRights() bool { logrus.Warnf("SID allocation error: %s", err) return false } - defer windows.FreeSid(sid) + defer func() { + _ = windows.FreeSid(sid) + }() // From MS docs: // "If TokenHandle is NULL, CheckTokenMembership uses the impersonation @@ -136,7 +146,7 @@ func relaunchElevatedWait() error { lpFile: uintptr(unsafe.Pointer(exe)), lpParameters: uintptr(unsafe.Pointer(arg)), lpDirectory: uintptr(unsafe.Pointer(cwd)), - nShow: 1, + nShow: syscall.SW_SHOWNORMAL, } info.cbSize = uint32(unsafe.Sizeof(*info)) procShellExecuteEx := shell32.NewProc("ShellExecuteExW") @@ -148,8 +158,10 @@ func relaunchElevatedWait() error { return wrapMaybef(err, "could not launch process, ShellEX Error = %d", info.hInstApp) } - handle := syscall.Handle(info.hProcess) - defer syscall.CloseHandle(handle) + handle := info.hProcess + defer func() { + _ = syscall.CloseHandle(handle) + }() w, err := syscall.WaitForSingleObject(handle, syscall.INFINITE) switch w { @@ -158,7 +170,7 @@ func relaunchElevatedWait() error { case syscall.WAIT_FAILED: return fmt.Errorf("could not wait for process, failed: %w", err) default: - return errors.New("could not wait for process, unknown error") + return fmt.Errorf("could not wait for process, unknown error. event: %X, err: %v", w, err) } var code uint32 if err := syscall.GetExitCodeProcess(handle, &code); err != nil { @@ -212,7 +224,7 @@ func reboot() error { } command := fmt.Sprintf(pShellLaunch, commFile) - if _, err := os.Lstat(filepath.Join(os.Getenv(localAppData), wtLocation)); err == nil { + if err := fileutils.Lexists(filepath.Join(os.Getenv(localAppData), wtLocation)); err == nil { wtCommand := wtPrefix + command // RunOnce is limited to 260 chars (supposedly no longer in Builds >= 19489) // For now fallback in cases of long usernames (>89 chars) @@ -221,14 +233,6 @@ func reboot() error { } } - if err := addRunOnceRegistryEntry(command); err != nil { - return err - } - - if err := obtainShutdownPrivilege(); err != nil { - return err - } - message := "To continue the process of enabling WSL, the system needs to reboot. " + "Alternatively, you can cancel and reboot manually\n\n" + "After rebooting, please wait a minute or two for podman machine to relaunch and continue installing." @@ -239,41 +243,17 @@ func reboot() error { return nil } - user32 := syscall.NewLazyDLL("user32") - procExit := user32.NewProc("ExitWindowsEx") - if ret, _, err := procExit.Call(EWX_REBOOT|EWX_RESTARTAPPS|EWX_FORCEIFHUNG, - SHTDN_REASON_MAJOR_APPLICATION|SHTDN_REASON_MINOR_INSTALLATION|SHTDN_REASON_FLAG_PLANNED); ret != 1 { - return fmt.Errorf("reboot failed: %w", err) - } - - return nil -} - -func obtainShutdownPrivilege() error { - const SeShutdownName = "SeShutdownPrivilege" - - advapi32 := syscall.NewLazyDLL("advapi32") - OpenProcessToken := advapi32.NewProc("OpenProcessToken") - LookupPrivilegeValue := advapi32.NewProc("LookupPrivilegeValueW") - AdjustTokenPrivileges := advapi32.NewProc("AdjustTokenPrivileges") - - proc, _ := syscall.GetCurrentProcess() - - var hToken uintptr - if ret, _, err := OpenProcessToken.Call(uintptr(proc), TOKEN_ADJUST_PRIVILEGES|TOKEN_QUERY, uintptr(unsafe.Pointer(&hToken))); ret != 1 { - return fmt.Errorf("opening process token: %w", err) - } - - var privs TokenPrivileges - if ret, _, err := LookupPrivilegeValue.Call(uintptr(0), uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(SeShutdownName))), uintptr(unsafe.Pointer(&(privs.privileges[0].luid)))); ret != 1 { - return fmt.Errorf("looking up shutdown privilege: %w", err) + if err := addRunOnceRegistryEntry(command); err != nil { + return err } - privs.privilegeCount = 1 - privs.privileges[0].attributes = SE_PRIVILEGE_ENABLED - - if ret, _, err := AdjustTokenPrivileges.Call(hToken, 0, uintptr(unsafe.Pointer(&privs)), 0, uintptr(0), 0); ret != 1 { - return fmt.Errorf("enabling shutdown privilege on token: %w", err) + if err := winio.RunWithPrivilege(rebootPrivilege, func() error { + if err := windows.ExitWindowsEx(rebootFlags, rebootReason); err != nil { + return fmt.Errorf("execute ExitWindowsEx to reboot system failed: %w", err) + } + return nil + }); err != nil { + return fmt.Errorf("cannot reboot system: %w", err) } return nil @@ -296,30 +276,25 @@ func addRunOnceRegistryEntry(command string) error { func encodeUTF16Bytes(s string) []byte { u16 := utf16.Encode([]rune(s)) - u16le := make([]byte, len(u16)*2) - for i := 0; i < len(u16); i++ { - u16le[i<<1] = byte(u16[i]) - u16le[(i<<1)+1] = byte(u16[i] >> 8) + buf := new(bytes.Buffer) + for _, r := range u16 { + _ = binary.Write(buf, binary.LittleEndian, r) } - return u16le + return buf.Bytes() } func MessageBox(caption, title string, fail bool) int { - var format int + var format uint32 if fail { - format = 0x10 + format = windows.MB_ICONERROR } else { - format = 0x41 + format = windows.MB_OKCANCEL | windows.MB_ICONINFORMATION } - user32 := syscall.NewLazyDLL("user32.dll") captionPtr, _ := syscall.UTF16PtrFromString(caption) titlePtr, _ := syscall.UTF16PtrFromString(title) - ret, _, _ := user32.NewProc("MessageBoxW").Call( - uintptr(0), - uintptr(unsafe.Pointer(captionPtr)), - uintptr(unsafe.Pointer(titlePtr)), - uintptr(format)) + + ret, _ := windows.MessageBox(0, captionPtr, titlePtr, format) return int(ret) } diff --git a/pkg/machine/wsl/wutil/wutil.go b/pkg/machine/wsl/wutil/wutil.go index 81320be36c..bc07ffa2a9 100644 --- a/pkg/machine/wsl/wutil/wutil.go +++ b/pkg/machine/wsl/wutil/wutil.go @@ -4,6 +4,7 @@ package wutil import ( "bufio" + "fmt" "io" "os" "os/exec" @@ -12,6 +13,7 @@ import ( "sync" "syscall" + "github.com/containers/storage/pkg/fileutils" "golang.org/x/text/encoding/unicode" "golang.org/x/text/transform" ) @@ -42,7 +44,7 @@ func FindWSL() string { locs = append(locs, filepath.Join(root, "System32", "wsl.exe")) for _, loc := range locs { - if _, err := os.Stat(loc); err == nil { + if err := fileutils.Exists(loc); err == nil { wslPath = loc return } @@ -73,7 +75,10 @@ func SilentExec(command string, args ...string) error { cmd.SysProcAttr = &syscall.SysProcAttr{CreationFlags: 0x08000000} cmd.Stdout = nil cmd.Stderr = nil - return cmd.Run() + if err := cmd.Run(); err != nil { + return fmt.Errorf("command %s %v failed: %w", command, args, err) + } + return nil } func SilentExecCmd(command string, args ...string) *exec.Cmd { diff --git a/pkg/ps/ps.go b/pkg/ps/ps.go index a3bb9b9f90..243d238715 100644 --- a/pkg/ps/ps.go +++ b/pkg/ps/ps.go @@ -238,29 +238,30 @@ func ListContainerBatch(rt *libpod.Runtime, ctr *libpod.Container, opts entities } ps := entities.ListContainer{ - AutoRemove: ctr.AutoRemove(), - CIDFile: conConfig.Spec.Annotations[define.InspectAnnotationCIDFile], - Command: conConfig.Command, - Created: conConfig.CreatedTime, - ExitCode: exitCode, - Exited: exited, - ExitedAt: exitedTime.Unix(), - ID: conConfig.ID, - Image: conConfig.RootfsImageName, - ImageID: conConfig.RootfsImageID, - IsInfra: conConfig.IsInfra, - Labels: conConfig.Labels, - Mounts: ctr.UserVolumes(), - Names: []string{conConfig.Name}, - Networks: networks, - Pid: pid, - Pod: conConfig.Pod, - Ports: portMappings, - Restarts: restartCount, - Size: size, - StartedAt: startedTime.Unix(), - State: conState.String(), - Status: healthStatus, + AutoRemove: ctr.AutoRemove(), + CIDFile: conConfig.Spec.Annotations[define.InspectAnnotationCIDFile], + Command: conConfig.Command, + Created: conConfig.CreatedTime, + ExitCode: exitCode, + Exited: exited, + ExitedAt: exitedTime.Unix(), + ExposedPorts: conConfig.ExposedPorts, + ID: conConfig.ID, + Image: conConfig.RootfsImageName, + ImageID: conConfig.RootfsImageID, + IsInfra: conConfig.IsInfra, + Labels: conConfig.Labels, + Mounts: ctr.UserVolumes(), + Names: []string{conConfig.Name}, + Networks: networks, + Pid: pid, + Pod: conConfig.Pod, + Ports: portMappings, + Restarts: restartCount, + Size: size, + StartedAt: startedTime.Unix(), + State: conState.String(), + Status: healthStatus, } if opts.Pod && len(conConfig.Pod) > 0 { podName, err := rt.GetPodName(conConfig.Pod) diff --git a/pkg/rootless/rootless.go b/pkg/rootless/rootless.go index e1466e91aa..3d654b1f35 100644 --- a/pkg/rootless/rootless.go +++ b/pkg/rootless/rootless.go @@ -7,6 +7,7 @@ import ( "sort" "sync" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/lockfile" "github.com/moby/sys/user" spec "github.com/opencontainers/runtime-spec/specs-go" @@ -16,7 +17,7 @@ import ( // TryJoinFromFilePaths. If joining fails, it attempts to delete the specified // file. func TryJoinPauseProcess(pausePidPath string) (bool, int, error) { - if _, err := os.Stat(pausePidPath); err != nil { + if err := fileutils.Exists(pausePidPath); err != nil { if errors.Is(err, os.ErrNotExist) { return false, -1, nil } diff --git a/pkg/rootless/rootless_freebsd.go b/pkg/rootless/rootless_freebsd.go index 2a459398b3..28c1a5e15d 100644 --- a/pkg/rootless/rootless_freebsd.go +++ b/pkg/rootless/rootless_freebsd.go @@ -57,11 +57,6 @@ func GetConfiguredMappings(quiet bool) ([]idtools.IDMap, []idtools.IDMap, error) return nil, nil, errors.New("this function is not supported on this os") } -// ReadMappingsProc returns the uid_map and gid_map -func ReadMappingsProc(path string) ([]idtools.IDMap, error) { - return nil, nil -} - // IsFdInherited checks whether the fd is opened and valid to use func IsFdInherited(fd int) bool { return int(C.is_fd_inherited(C.int(fd))) > 0 diff --git a/pkg/rootless/rootless_linux.c b/pkg/rootless/rootless_linux.c index 66963660a7..6e4702946b 100644 --- a/pkg/rootless/rootless_linux.c +++ b/pkg/rootless/rootless_linux.c @@ -880,7 +880,7 @@ reexec_userns_join (int pid_to_join, char *pause_pid_file_path) setenv ("LISTEN_FDNAMES", saved_systemd_listen_fdnames, true); } - setenv ("_CONTAINERS_USERNS_CONFIGURED", "init", 1); + setenv ("_CONTAINERS_USERNS_CONFIGURED", "done", 1); setenv ("_CONTAINERS_ROOTLESS_UID", uid, 1); setenv ("_CONTAINERS_ROOTLESS_GID", gid, 1); @@ -922,8 +922,8 @@ reexec_userns_join (int pid_to_join, char *pause_pid_file_path) _exit (EXIT_FAILURE); } - execvp (argv[0], argv); - fprintf (stderr, "failed to execvp %s: %m\n", argv[0]); + execvp ("/proc/self/exe", argv); + fprintf (stderr, "failed to reexec: %m\n"); _exit (EXIT_FAILURE); } @@ -1081,7 +1081,7 @@ reexec_in_user_namespace (int ready, char *pause_pid_file_path, char *file_to_re setenv ("LISTEN_FDNAMES", saved_systemd_listen_fdnames, true); } - setenv ("_CONTAINERS_USERNS_CONFIGURED", "init", 1); + setenv ("_CONTAINERS_USERNS_CONFIGURED", "done", 1); setenv ("_CONTAINERS_ROOTLESS_UID", uid, 1); setenv ("_CONTAINERS_ROOTLESS_GID", gid, 1); @@ -1145,7 +1145,8 @@ reexec_in_user_namespace (int ready, char *pause_pid_file_path, char *file_to_re _exit (ret == 0 ? EXIT_SUCCESS : EXIT_FAILURE); } - execvp (argv[0], argv); + execvp ("/proc/self/exe", argv); + fprintf (stderr, "failed to reexec: %m\n"); _exit (EXIT_FAILURE); } diff --git a/pkg/rootless/rootless_linux.go b/pkg/rootless/rootless_linux.go index 40f81301fb..61133ababa 100644 --- a/pkg/rootless/rootless_linux.go +++ b/pkg/rootless/rootless_linux.go @@ -3,11 +3,9 @@ package rootless import ( - "bufio" "bytes" "errors" "fmt" - "io" "os" "os/exec" gosignal "os/signal" @@ -22,6 +20,7 @@ import ( "github.com/containers/storage/pkg/idtools" pmount "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/unshare" + "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" "github.com/syndtr/gocapability/capability" "golang.org/x/sys/unix" @@ -514,40 +513,9 @@ func TryJoinFromFilePaths(pausePidPath string, needNewNamespace bool, paths []st return false, 0, fmt.Errorf("could not find any running process: %w", unix.ESRCH) } -// ReadMappingsProc parses and returns the ID mappings at the specified path. -func ReadMappingsProc(path string) ([]idtools.IDMap, error) { - file, err := os.Open(path) - if err != nil { - return nil, err - } - defer file.Close() - - mappings := []idtools.IDMap{} - - buf := bufio.NewReader(file) - for { - line, _, err := buf.ReadLine() - if err != nil { - if err == io.EOF { - return mappings, nil - } - return nil, fmt.Errorf("cannot read line from %s: %w", path, err) - } - if line == nil { - return mappings, nil - } - - containerID, hostID, size := 0, 0, 0 - if _, err := fmt.Sscanf(string(line), "%d %d %d", &containerID, &hostID, &size); err != nil { - return nil, fmt.Errorf("cannot parse %s: %w", string(line), err) - } - mappings = append(mappings, idtools.IDMap{ContainerID: containerID, HostID: hostID, Size: size}) - } -} - -func matches(id int, configuredIDs []idtools.IDMap, currentIDs []idtools.IDMap) bool { +func matches(id int, configuredIDs []idtools.IDMap, currentIDs []specs.LinuxIDMapping) bool { // The first mapping is the host user, handle it separately. - if currentIDs[0].HostID != id || currentIDs[0].Size != 1 { + if currentIDs[0].HostID != uint32(id) || currentIDs[0].Size != 1 { return false } @@ -558,10 +526,10 @@ func matches(id int, configuredIDs []idtools.IDMap, currentIDs []idtools.IDMap) // It is fine to iterate sequentially as both slices are sorted. for i := range currentIDs { - if currentIDs[i].HostID != configuredIDs[i].HostID { + if currentIDs[i].HostID != uint32(configuredIDs[i].HostID) { return false } - if currentIDs[i].Size != configuredIDs[i].Size { + if currentIDs[i].Size != uint32(configuredIDs[i].Size) { return false } } @@ -581,7 +549,7 @@ func ConfigurationMatches() (bool, error) { return false, err } - currentUIDs, err := ReadMappingsProc("/proc/self/uid_map") + currentUIDs, currentGIDs, err := unshare.GetHostIDMappings("") if err != nil { return false, err } @@ -590,11 +558,6 @@ func ConfigurationMatches() (bool, error) { return false, err } - currentGIDs, err := ReadMappingsProc("/proc/self/gid_map") - if err != nil { - return false, err - } - return matches(GetRootlessGID(), gids, currentGIDs), nil } diff --git a/pkg/rootless/rootless_unsupported.go b/pkg/rootless/rootless_unsupported.go index 587fb4cb96..0d587644fd 100644 --- a/pkg/rootless/rootless_unsupported.go +++ b/pkg/rootless/rootless_unsupported.go @@ -60,11 +60,6 @@ func GetConfiguredMappings(quiet bool) ([]idtools.IDMap, []idtools.IDMap, error) return nil, nil, errors.New("this function is not supported on this os") } -// ReadMappingsProc returns the uid_map and gid_map -func ReadMappingsProc(path string) ([]idtools.IDMap, error) { - return nil, nil -} - // IsFdInherited checks whether the fd is opened and valid to use func IsFdInherited(fd int) bool { return false diff --git a/pkg/signal/signal_common.go b/pkg/signal/signal_common.go index a81d0461b5..1061058291 100644 --- a/pkg/signal/signal_common.go +++ b/pkg/signal/signal_common.go @@ -71,3 +71,11 @@ func ParseSysSignalToName(s syscall.Signal) (string, error) { } return "", fmt.Errorf("unknown syscall signal: %s", s) } + +func ToDockerFormat(s uint) string { + var signalStr, err = ParseSysSignalToName(syscall.Signal(s)) + if err != nil { + return strconv.FormatUint(uint64(s), 10) + } + return fmt.Sprintf("SIG%s", signalStr) +} diff --git a/pkg/specgen/container_validate.go b/pkg/specgen/container_validate.go index f740719af4..251932590a 100644 --- a/pkg/specgen/container_validate.go +++ b/pkg/specgen/container_validate.go @@ -3,10 +3,10 @@ package specgen import ( "errors" "fmt" + "slices" "strings" "github.com/containers/podman/v5/libpod/define" - "golang.org/x/exp/slices" ) var ( diff --git a/pkg/specgen/generate/config_freebsd.go b/pkg/specgen/generate/config_freebsd.go index ec5c8622dd..bfbbdd93ed 100644 --- a/pkg/specgen/generate/config_freebsd.go +++ b/pkg/specgen/generate/config_freebsd.go @@ -18,14 +18,16 @@ import ( // DevicesFromPath computes a list of devices func DevicesFromPath(g *generate.Generator, devicePath string) error { if isCDIDevice(devicePath) { - registry := cdi.GetRegistry( + registry, err := cdi.NewCache( cdi.WithAutoRefresh(false), ) + if err != nil { + return fmt.Errorf("creating CDI registry: %w", err) + } if err := registry.Refresh(); err != nil { logrus.Debugf("The following error was triggered when refreshing the CDI registry: %v", err) } - _, err := registry.InjectDevices(g.Config, devicePath) - if err != nil { + if _, err = registry.InjectDevices(g.Config, devicePath); err != nil { return fmt.Errorf("setting up CDI devices: %w", err) } return nil diff --git a/pkg/specgen/generate/config_linux.go b/pkg/specgen/generate/config_linux.go index 4d62c93e01..ddca2b151c 100644 --- a/pkg/specgen/generate/config_linux.go +++ b/pkg/specgen/generate/config_linux.go @@ -14,6 +14,7 @@ import ( "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/pkg/rootless" "github.com/containers/podman/v5/pkg/util" + "github.com/containers/storage/pkg/fileutils" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" @@ -25,14 +26,16 @@ import ( // DevicesFromPath computes a list of devices func DevicesFromPath(g *generate.Generator, devicePath string) error { if isCDIDevice(devicePath) { - registry := cdi.GetRegistry( + registry, err := cdi.NewCache( cdi.WithAutoRefresh(false), ) + if err != nil { + return fmt.Errorf("creating CDI registry: %w", err) + } if err := registry.Refresh(); err != nil { logrus.Debugf("The following error was triggered when refreshing the CDI registry: %v", err) } - _, err := registry.InjectDevices(g.Config, devicePath) - if err != nil { + if _, err := registry.InjectDevices(g.Config, devicePath); err != nil { return fmt.Errorf("setting up CDI devices: %w", err) } return nil @@ -131,7 +134,7 @@ func addDevice(g *generate.Generator, device string) error { return fmt.Errorf("%s is not a valid device: %w", src, err) } if rootless.IsRootless() { - if _, err := os.Stat(src); err != nil { + if err := fileutils.Exists(src); err != nil { return err } perm := "ro" diff --git a/pkg/specgen/generate/container_create.go b/pkg/specgen/generate/container_create.go index db4f8cdb60..8d4029114b 100644 --- a/pkg/specgen/generate/container_create.go +++ b/pkg/specgen/generate/container_create.go @@ -501,6 +501,7 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l Dest: v.Destination, Source: v.Source, ReadWrite: v.ReadWrite, + SubPath: v.SubPath, }) } options = append(options, libpod.WithImageVolumes(vols)) @@ -619,7 +620,12 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l } restartPolicy = s.RestartPolicy } - options = append(options, libpod.WithRestartRetries(retries), libpod.WithRestartPolicy(restartPolicy)) + if restartPolicy != "" { + options = append(options, libpod.WithRestartPolicy(restartPolicy)) + } + if retries != 0 { + options = append(options, libpod.WithRestartRetries(retries)) + } healthCheckSet := false if s.ContainerHealthCheckConfig.HealthConfig != nil { diff --git a/pkg/specgen/generate/kube/kube.go b/pkg/specgen/generate/kube/kube.go index 25afd00f6d..1328323ceb 100644 --- a/pkg/specgen/generate/kube/kube.go +++ b/pkg/specgen/generate/kube/kube.go @@ -12,6 +12,7 @@ import ( "os" "regexp" "runtime" + "slices" "strconv" "strings" "time" @@ -37,7 +38,6 @@ import ( "github.com/docker/go-units" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" "sigs.k8s.io/yaml" ) @@ -142,6 +142,8 @@ type CtrSpecGenOptions struct { Volumes map[string]*KubeVolume // VolumesFrom for all containers VolumesFrom []string + // Image Volumes for this container + ImageVolumes []*specgen.ImageVolume // PodID of the parent pod PodID string // PodName of the parent pod @@ -223,6 +225,8 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener Driver: opts.LogDriver, } + s.ImageVolumes = opts.ImageVolumes + s.LogConfiguration.Options = make(map[string]string) for _, o := range opts.LogOptions { opt, val, hasVal := strings.Cut(o, "=") @@ -731,7 +735,7 @@ func setupLivenessProbe(s *specgen.SpecGenerator, containerYAML v1.Container, re return err } // if restart policy is in place, ensure the health check enforces it - if restartPolicy == "always" || restartPolicy == "onfailure" { + if restartPolicy == define.RestartPolicyAlways || restartPolicy == define.RestartPolicyOnFailure { s.HealthCheckOnFailureAction = define.HealthCheckOnFailureActionRestart } return nil @@ -763,7 +767,7 @@ func setupStartupProbe(s *specgen.SpecGenerator, containerYAML v1.Container, res Successes: int(containerYAML.StartupProbe.SuccessThreshold), } // if restart policy is in place, ensure the health check enforces it - if restartPolicy == "always" || restartPolicy == "onfailure" { + if restartPolicy == define.RestartPolicyAlways || restartPolicy == define.RestartPolicyOnFailure { s.HealthCheckOnFailureAction = define.HealthCheckOnFailureActionRestart } return nil diff --git a/pkg/specgen/generate/kube/play_test.go b/pkg/specgen/generate/kube/play_test.go index 88580f5b29..f16d3343da 100644 --- a/pkg/specgen/generate/kube/play_test.go +++ b/pkg/specgen/generate/kube/play_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/containers/common/pkg/secrets" + "github.com/containers/podman/v5/libpod/define" v1 "github.com/containers/podman/v5/pkg/k8s.io/api/core/v1" "github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/api/resource" v12 "github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/apis/meta/v1" @@ -1371,6 +1372,24 @@ func TestTCPLivenessProbe(t *testing.T) { "myservice.domain.com", "4000", }, + { + "TCPLivenessProbeNormalWithOnFailureRestartPolicy", + specgen.SpecGenerator{}, + v1.Container{ + LivenessProbe: &v1.Probe{ + Handler: v1.Handler{ + TCPSocket: &v1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(8080), + }, + }, + }, + }, + "on-failure", + true, + "127.0.0.1", + "8080", + }, } for _, test := range tests { @@ -1379,6 +1398,7 @@ func TestTCPLivenessProbe(t *testing.T) { err := setupLivenessProbe(&test.specGenerator, test.container, test.restartPolicy) assert.Equal(t, err == nil, test.succeed) if err == nil { + assert.Equal(t, int(test.specGenerator.ContainerHealthCheckConfig.HealthCheckOnFailureAction), define.HealthCheckOnFailureActionRestart) assert.Contains(t, test.specGenerator.ContainerHealthCheckConfig.HealthConfig.Test, test.expectedHost) assert.Contains(t, test.specGenerator.ContainerHealthCheckConfig.HealthConfig.Test, test.expectedPort) } diff --git a/pkg/specgen/generate/kube/volume.go b/pkg/specgen/generate/kube/volume.go index 7229b24715..f17ba962af 100644 --- a/pkg/specgen/generate/kube/volume.go +++ b/pkg/specgen/generate/kube/volume.go @@ -5,12 +5,14 @@ package kube import ( "errors" "fmt" + "io/fs" "os" "github.com/containers/common/pkg/parse" "github.com/containers/common/pkg/secrets" "github.com/containers/podman/v5/libpod" v1 "github.com/containers/podman/v5/pkg/k8s.io/api/core/v1" + "github.com/containers/storage/pkg/fileutils" "github.com/sirupsen/logrus" "sigs.k8s.io/yaml" @@ -69,7 +71,7 @@ func VolumeFromHostPath(hostPath *v1.HostPathVolumeSource, mountLabel string) (* return nil, fmt.Errorf("giving %s a label: %w", hostPath.Path, err) } case v1.HostPathFileOrCreate: - if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) { + if err := fileutils.Exists(hostPath.Path); errors.Is(err, fs.ErrNotExist) { f, err := os.OpenFile(hostPath.Path, os.O_RDONLY|os.O_CREATE, kubeFilePermission) if err != nil { return nil, fmt.Errorf("creating HostPath: %w", err) diff --git a/pkg/specgen/generate/namespaces_linux.go b/pkg/specgen/generate/namespaces_linux.go index fb22b12b18..2c304f7b1b 100644 --- a/pkg/specgen/generate/namespaces_linux.go +++ b/pkg/specgen/generate/namespaces_linux.go @@ -9,6 +9,7 @@ import ( "github.com/containers/podman/v5/libpod" "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/pkg/specgen" + "github.com/containers/storage/pkg/fileutils" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/sirupsen/logrus" @@ -18,7 +19,7 @@ func specConfigureNamespaces(s *specgen.SpecGenerator, g *generate.Generator, rt // PID switch s.PidNS.NSMode { case specgen.Path: - if _, err := os.Stat(s.PidNS.Value); err != nil { + if err := fileutils.Exists(s.PidNS.Value); err != nil { return fmt.Errorf("cannot find specified PID namespace path: %w", err) } if err := g.AddOrReplaceLinuxNamespace(string(spec.PIDNamespace), s.PidNS.Value); err != nil { @@ -37,7 +38,7 @@ func specConfigureNamespaces(s *specgen.SpecGenerator, g *generate.Generator, rt // IPC switch s.IpcNS.NSMode { case specgen.Path: - if _, err := os.Stat(s.IpcNS.Value); err != nil { + if err := fileutils.Exists(s.IpcNS.Value); err != nil { return fmt.Errorf("cannot find specified IPC namespace path: %w", err) } if err := g.AddOrReplaceLinuxNamespace(string(spec.IPCNamespace), s.IpcNS.Value); err != nil { @@ -56,7 +57,7 @@ func specConfigureNamespaces(s *specgen.SpecGenerator, g *generate.Generator, rt // UTS switch s.UtsNS.NSMode { case specgen.Path: - if _, err := os.Stat(s.UtsNS.Value); err != nil { + if err := fileutils.Exists(s.UtsNS.Value); err != nil { return fmt.Errorf("cannot find specified UTS namespace path: %w", err) } if err := g.AddOrReplaceLinuxNamespace(string(spec.UTSNamespace), s.UtsNS.Value); err != nil { @@ -114,7 +115,7 @@ func specConfigureNamespaces(s *specgen.SpecGenerator, g *generate.Generator, rt // Cgroup switch s.CgroupNS.NSMode { case specgen.Path: - if _, err := os.Stat(s.CgroupNS.Value); err != nil { + if err := fileutils.Exists(s.CgroupNS.Value); err != nil { return fmt.Errorf("cannot find specified cgroup namespace path: %w", err) } if err := g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), s.CgroupNS.Value); err != nil { @@ -133,7 +134,7 @@ func specConfigureNamespaces(s *specgen.SpecGenerator, g *generate.Generator, rt // Net switch s.NetNS.NSMode { case specgen.Path: - if _, err := os.Stat(s.NetNS.Value); err != nil { + if err := fileutils.Exists(s.NetNS.Value); err != nil { return fmt.Errorf("cannot find specified network namespace path: %w", err) } if err := g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), s.NetNS.Value); err != nil { diff --git a/pkg/specgen/generate/oci_linux.go b/pkg/specgen/generate/oci_linux.go index da9b30ec6b..d6247bbf67 100644 --- a/pkg/specgen/generate/oci_linux.go +++ b/pkg/specgen/generate/oci_linux.go @@ -254,24 +254,21 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt } var userDevices []spec.LinuxDevice - - if !s.IsPrivileged() { - // add default devices from containers.conf - for _, device := range rtc.Containers.Devices.Get() { - if err = DevicesFromPath(&g, device); err != nil { - return nil, err - } - } - if len(compatibleOptions.HostDeviceList) > 0 && len(s.Devices) == 0 { - userDevices = compatibleOptions.HostDeviceList - } else { - userDevices = s.Devices + // add default devices from containers.conf + for _, device := range rtc.Containers.Devices.Get() { + if err = DevicesFromPath(&g, device); err != nil { + return nil, err } - // add default devices specified by caller - for _, device := range userDevices { - if err = DevicesFromPath(&g, device.Path); err != nil { - return nil, err - } + } + if len(compatibleOptions.HostDeviceList) > 0 && len(s.Devices) == 0 { + userDevices = compatibleOptions.HostDeviceList + } else { + userDevices = s.Devices + } + // add default devices specified by caller + for _, device := range userDevices { + if err = DevicesFromPath(&g, device.Path); err != nil { + return nil, err } } s.HostDeviceList = userDevices diff --git a/pkg/specgen/generate/ports.go b/pkg/specgen/generate/ports.go index 0218050e03..0575fc7c5e 100644 --- a/pkg/specgen/generate/ports.go +++ b/pkg/specgen/generate/ports.go @@ -5,6 +5,7 @@ package generate import ( "fmt" "net" + "slices" "sort" "strings" @@ -14,7 +15,6 @@ import ( "github.com/containers/podman/v5/pkg/specgenutil" "github.com/containers/podman/v5/utils" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) const ( diff --git a/pkg/specgen/generate/security_linux.go b/pkg/specgen/generate/security_linux.go index a04508586c..3a8076b464 100644 --- a/pkg/specgen/generate/security_linux.go +++ b/pkg/specgen/generate/security_linux.go @@ -4,6 +4,7 @@ package generate import ( "fmt" + "slices" "strings" "github.com/containers/common/libimage" @@ -17,7 +18,6 @@ import ( "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // setLabelOpts sets the label options of the SecurityConfig according to the diff --git a/pkg/specgen/generate/storage.go b/pkg/specgen/generate/storage.go index 9ff8700cd5..7703e90815 100644 --- a/pkg/specgen/generate/storage.go +++ b/pkg/specgen/generate/storage.go @@ -6,7 +6,7 @@ import ( "context" "errors" "fmt" - "os" + "io/fs" "path" "path/filepath" "strings" @@ -18,6 +18,7 @@ import ( "github.com/containers/podman/v5/libpod/define" "github.com/containers/podman/v5/pkg/specgen" "github.com/containers/podman/v5/pkg/util" + "github.com/containers/storage/pkg/fileutils" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) @@ -391,7 +392,7 @@ func addContainerInitBinary(s *specgen.SpecGenerator, path string) (spec.Mount, if s.Systemd == "always" { return mount, errors.New("cannot use container-init binary with systemd=always") } - if _, err := os.Stat(path); os.IsNotExist(err) { + if err := fileutils.Exists(path); errors.Is(err, fs.ErrNotExist) { return mount, fmt.Errorf("container-init binary not found on the host: %w", err) } return mount, nil diff --git a/pkg/specgen/generate/validate_linux.go b/pkg/specgen/generate/validate_linux.go index 25c249613a..3603b13757 100644 --- a/pkg/specgen/generate/validate_linux.go +++ b/pkg/specgen/generate/validate_linux.go @@ -13,6 +13,7 @@ import ( "github.com/containers/common/pkg/sysinfo" "github.com/containers/podman/v5/pkg/rootless" "github.com/containers/podman/v5/pkg/specgen" + "github.com/containers/storage/pkg/fileutils" "github.com/opencontainers/runtime-spec/specs-go" ) @@ -191,8 +192,8 @@ func verifyContainerResourcesCgroupV2(s *specgen.SpecGenerator) ([]string, error memoryMax := filepath.Join("/sys/fs/cgroup", own, "memory.max") memorySwapMax := filepath.Join("/sys/fs/cgroup", own, "memory.swap.max") - _, errMemoryMax := os.Stat(memoryMax) - _, errMemorySwapMax := os.Stat(memorySwapMax) + errMemoryMax := fileutils.Exists(memoryMax) + errMemorySwapMax := fileutils.Exists(memorySwapMax) // Differently than cgroup v1, the memory.*max files are not present in the // root directory, so we cannot query directly that, so as best effort use // the current cgroup. diff --git a/pkg/specgen/namespaces.go b/pkg/specgen/namespaces.go index 4ff329922f..f685ab3e8c 100644 --- a/pkg/specgen/namespaces.go +++ b/pkg/specgen/namespaces.go @@ -4,7 +4,7 @@ import ( "errors" "fmt" "net" - "os" + "slices" "strings" "github.com/containers/common/libnetwork/types" @@ -13,10 +13,10 @@ import ( "github.com/containers/podman/v5/pkg/namespaces" "github.com/containers/podman/v5/pkg/rootless" "github.com/containers/podman/v5/pkg/util" + "github.com/containers/storage/pkg/fileutils" storageTypes "github.com/containers/storage/types" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" - "golang.org/x/exp/slices" ) type NamespaceMode string @@ -483,7 +483,7 @@ func SetupUserNS(idmappings *storageTypes.IDMappingOptions, userns Namespace, g var user string switch userns.NSMode { case Path: - if _, err := os.Stat(userns.Value); err != nil { + if err := fileutils.Exists(userns.Value); err != nil { return user, fmt.Errorf("cannot find specified user namespace path: %w", err) } if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), userns.Value); err != nil { diff --git a/pkg/specgen/volumes.go b/pkg/specgen/volumes.go index 075711138c..d2c1e54876 100644 --- a/pkg/specgen/volumes.go +++ b/pkg/specgen/volumes.go @@ -53,6 +53,9 @@ type ImageVolume struct { Destination string // ReadWrite sets the volume writable. ReadWrite bool + // SubPath mounts a particular path within the image. + // If empty, the whole image is mounted. + SubPath string `json:"subPath,omitempty"` } // GenVolumeMounts parses user input into mounts, volumes and overlay volumes diff --git a/pkg/specgen/winpath_linux.go b/pkg/specgen/winpath_linux.go index 16c62492d2..f33b3002e3 100644 --- a/pkg/specgen/winpath_linux.go +++ b/pkg/specgen/winpath_linux.go @@ -1,9 +1,8 @@ package specgen import ( - "os" - "github.com/containers/common/pkg/machine" + "github.com/containers/storage/pkg/fileutils" ) func shouldResolveWinPaths() bool { @@ -11,8 +10,7 @@ func shouldResolveWinPaths() bool { } func shouldResolveUnixWinVariant(path string) bool { - _, err := os.Stat(path) - return err != nil + return fileutils.Exists(path) != nil } func resolveRelativeOnWindows(path string) string { diff --git a/pkg/specgen/winpath_windows.go b/pkg/specgen/winpath_windows.go index c6aad314a2..168e30062c 100644 --- a/pkg/specgen/winpath_windows.go +++ b/pkg/specgen/winpath_windows.go @@ -1,9 +1,10 @@ package specgen import ( - "github.com/sirupsen/logrus" - "os" "path/filepath" + + "github.com/containers/storage/pkg/fileutils" + "github.com/sirupsen/logrus" ) func shouldResolveUnixWinVariant(path string) bool { @@ -25,6 +26,5 @@ func resolveRelativeOnWindows(path string) string { } func winPathExists(path string) bool { - _, err := os.Stat(path) - return err == nil + return fileutils.Exists(path) == nil } diff --git a/pkg/specgenutil/specgen.go b/pkg/specgenutil/specgen.go index 912193b453..c9dc0775d5 100644 --- a/pkg/specgenutil/specgen.go +++ b/pkg/specgenutil/specgen.go @@ -222,7 +222,8 @@ func setNamespaces(rtc *config.Config, s *specgen.SpecGenerator, c *entities.Con } } userns := c.UserNS - if userns == "" && c.Pod == "" { + // caller must make sure s.Pod is set before calling this function. + if userns == "" && s.Pod == "" { if ns, ok := os.LookupEnv("PODMAN_USERNS"); ok { userns = ns } else { @@ -388,6 +389,22 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions s.StartupHealthConfig.Successes = int(c.StartupHCSuccesses) } + if len(s.Pod) == 0 || len(c.Pod) > 0 { + s.Pod = c.Pod + } + + if len(c.PodIDFile) > 0 { + if len(s.Pod) > 0 { + return errors.New("cannot specify both --pod and --pod-id-file") + } + podID, err := ReadPodIDFile(c.PodIDFile) + if err != nil { + return err + } + s.Pod = podID + } + + // Important s.Pod must be set above here. if err := setNamespaces(rtc, s, c); err != nil { return err } @@ -408,21 +425,6 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions s.PublishExposedPorts = &c.PublishAll } - if len(s.Pod) == 0 || len(c.Pod) > 0 { - s.Pod = c.Pod - } - - if len(c.PodIDFile) > 0 { - if len(s.Pod) > 0 { - return errors.New("cannot specify both --pod and --pod-id-file") - } - podID, err := ReadPodIDFile(c.PodIDFile) - if err != nil { - return err - } - s.Pod = podID - } - expose, err := CreateExpose(c.Expose) if err != nil { return err diff --git a/pkg/specgenutil/volumes.go b/pkg/specgenutil/volumes.go index c481867163..510b11254b 100644 --- a/pkg/specgenutil/volumes.go +++ b/pkg/specgenutil/volumes.go @@ -611,6 +611,14 @@ func getImageVolume(args []string) (*specgen.ImageVolume, error) { default: return nil, fmt.Errorf("invalid rw value %q: %w", value, util.ErrBadMntOption) } + case "subpath": + if !hasValue { + return nil, fmt.Errorf("%v: %w", name, errOptionArg) + } + if !filepath.IsAbs(value) { + return nil, fmt.Errorf("volume subpath %q must be an absolute path", value) + } + newVolume.SubPath = value case "consistency": // Often used on MACs and mistakenly on Linux platforms. // Since Docker ignores this option so shall we. diff --git a/pkg/systemd/notifyproxy/notifyproxy.go b/pkg/systemd/notifyproxy/notifyproxy.go index 240aed3129..c8ea4748be 100644 --- a/pkg/systemd/notifyproxy/notifyproxy.go +++ b/pkg/systemd/notifyproxy/notifyproxy.go @@ -1,3 +1,5 @@ +//go:build !windows + package notifyproxy import ( diff --git a/pkg/systemd/notifyproxy/notifyproxy_test.go b/pkg/systemd/notifyproxy/notifyproxy_test.go index aaada1ad30..e6767ab34d 100644 --- a/pkg/systemd/notifyproxy/notifyproxy_test.go +++ b/pkg/systemd/notifyproxy/notifyproxy_test.go @@ -1,3 +1,5 @@ +//go:build !windows + package notifyproxy import ( diff --git a/pkg/systemd/parser/split.go b/pkg/systemd/parser/split.go index c2778032a9..433e869a65 100644 --- a/pkg/systemd/parser/split.go +++ b/pkg/systemd/parser/split.go @@ -428,13 +428,17 @@ func splitString(s string, separators string, flags SplitFlags) ([]string, error return splitStringAppend(make([]string, 0), s, separators, flags) } -func charNeedEscape(c rune) bool { +func charNeedEscape(c rune, isPath bool) bool { if c > 128 { return false /* unicode is ok */ } + pathRune := (isPath && c == '-') || + (isPath && c == '/') + return unicode.IsSpace(c) || unicode.IsControl(c) || + pathRune || c == '"' || c == '\'' || c == '\\' @@ -442,18 +446,22 @@ func charNeedEscape(c rune) bool { func wordNeedEscape(word string) bool { for _, c := range word { - if charNeedEscape(c) { + if charNeedEscape(c, false) { return true } } - return false } func appendEscapeWord(escaped *strings.Builder, word string) { escaped.WriteRune('"') - for _, c := range word { - if charNeedEscape(c) { + escapeString(escaped, word, false) + escaped.WriteRune('"') +} + +func escapeString(escaped *strings.Builder, word string, isPath bool) { + for i, c := range word { + if charNeedEscape(c, isPath) { switch c { case '\a': escaped.WriteString("\\a") @@ -477,6 +485,10 @@ func appendEscapeWord(escaped *strings.Builder, word string) { escaped.WriteString("\\\"") case '\'': escaped.WriteString("'") + case '/': + if isPath && i != 0 { + escaped.WriteString("-") + } default: escaped.WriteString(fmt.Sprintf("\\x%.2x", c)) } @@ -484,7 +496,6 @@ func appendEscapeWord(escaped *strings.Builder, word string) { escaped.WriteRune(c) } } - escaped.WriteRune('"') } func escapeWords(words []string) string { @@ -500,6 +511,5 @@ func escapeWords(words []string) string { escaped.WriteString(word) } } - return escaped.String() } diff --git a/pkg/systemd/parser/unitfile.go b/pkg/systemd/parser/unitfile.go index b7b47d54a1..9ecbf4a158 100644 --- a/pkg/systemd/parser/unitfile.go +++ b/pkg/systemd/parser/unitfile.go @@ -90,6 +90,11 @@ func (g *unitGroup) addLine(line *unitLine) { g.lines = append(g.lines, line) } +func (g *unitGroup) prependLine(line *unitLine) { + n := []*unitLine{line} + g.lines = append(n, g.lines...) +} + func (g *unitGroup) addComment(line *unitLine) { g.comments = append(g.comments, line) } @@ -923,6 +928,17 @@ func (f *UnitFile) PrependComment(groupName string, comments ...string) { } } +func (f *UnitFile) PrependUnitLine(groupName string, key string, value string) { + var group *unitGroup + if groupName == "" && len(f.groups) > 0 { + group = f.groups[0] + } else { + // Uses magic "" for first comment-only group if no other groups + group = f.ensureGroup(groupName) + } + group.prependLine(newUnitLine(key, value, false)) +} + func (f *UnitFile) GetTemplateParts() (string, string) { ext := filepath.Ext(f.Filename) basename := strings.TrimSuffix(f.Filename, ext) @@ -932,3 +948,9 @@ func (f *UnitFile) GetTemplateParts() (string, string) { } return parts[0] + "@" + ext, parts[1] } + +func PathEscape(path string) string { + var escaped strings.Builder + escapeString(&escaped, path, true) + return escaped.String() +} diff --git a/pkg/systemd/quadlet/quadlet.go b/pkg/systemd/quadlet/quadlet.go index 514d1cf854..0edd38cd07 100644 --- a/pkg/systemd/quadlet/quadlet.go +++ b/pkg/systemd/quadlet/quadlet.go @@ -11,6 +11,7 @@ import ( "github.com/containers/podman/v5/pkg/specgenutilexternal" "github.com/containers/podman/v5/pkg/systemd/parser" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/regexp" ) @@ -34,12 +35,14 @@ const ( UnitGroup = "Unit" VolumeGroup = "Volume" ImageGroup = "Image" + BuildGroup = "Build" XContainerGroup = "X-Container" XKubeGroup = "X-Kube" XNetworkGroup = "X-Network" XPodGroup = "X-Pod" XVolumeGroup = "X-Volume" XImageGroup = "X-Image" + XBuildGroup = "X-Build" ) // Systemd Unit file keys @@ -77,10 +80,13 @@ const ( KeyExec = "Exec" KeyExitCodePropagation = "ExitCodePropagation" KeyExposeHostPort = "ExposeHostPort" + KeyFile = "File" + KeyForceRM = "ForceRM" KeyGateway = "Gateway" KeyGIDMap = "GIDMap" KeyGlobalArgs = "GlobalArgs" KeyGroup = "Group" + KeyGroupAdd = "GroupAdd" KeyHealthCmd = "HealthCmd" KeyHealthInterval = "HealthInterval" KeyHealthOnFailure = "HealthOnFailure" @@ -104,6 +110,7 @@ const ( KeyKubeDownForce = "KubeDownForce" KeyLabel = "Label" KeyLogDriver = "LogDriver" + KeyLogOpt = "LogOpt" KeyMask = "Mask" KeyMount = "Mount" KeyNetwork = "Network" @@ -140,6 +147,7 @@ const ( KeySubnet = "Subnet" KeySubUIDMap = "SubUIDMap" KeySysctl = "Sysctl" + KeyTarget = "Target" KeyTimezone = "Timezone" KeyTLSVerify = "TLSVerify" KeyTmpfs = "Tmpfs" @@ -163,6 +171,7 @@ type PodInfo struct { } var ( + URL = regexp.Delayed(`^((https?)|(git)://)|(github\.com/).+$`) validPortRange = regexp.Delayed(`\d+(-\d+)?(/udp|/tcp)?$`) // Supported keys in "Container" group @@ -186,6 +195,7 @@ var ( KeyGIDMap: true, KeyGlobalArgs: true, KeyGroup: true, + KeyGroupAdd: true, KeyHealthCmd: true, KeyHealthInterval: true, KeyHealthOnFailure: true, @@ -203,6 +213,7 @@ var ( KeyImage: true, KeyLabel: true, KeyLogDriver: true, + KeyLogOpt: true, KeyMask: true, KeyMount: true, KeyNetwork: true, @@ -290,6 +301,7 @@ var ( KeyGlobalArgs: true, KeyKubeDownForce: true, KeyLogDriver: true, + KeyLogOpt: true, KeyNetwork: true, KeyPodmanArgs: true, KeyPublishPort: true, @@ -320,6 +332,33 @@ var ( KeyVariant: true, } + // Supported keys in "Build" group + supportedBuildKeys = map[string]bool{ + KeyAnnotation: true, + KeyArch: true, + KeyAuthFile: true, + KeyContainersConfModule: true, + KeyDNS: true, + KeyDNSOption: true, + KeyDNSSearch: true, + KeyEnvironment: true, + KeyFile: true, + KeyForceRM: true, + KeyGlobalArgs: true, + KeyGroupAdd: true, + KeyImageTag: true, + KeyLabel: true, + KeyNetwork: true, + KeyPodmanArgs: true, + KeyPull: true, + KeySecret: true, + KeySetWorkingDirectory: true, + KeyTarget: true, + KeyTLSVerify: true, + KeyVariant: true, + KeyVolume: true, + } + supportedPodKeys = map[string]bool{ KeyContainersConfModule: true, KeyGlobalArgs: true, @@ -342,6 +381,10 @@ func replaceExtension(name string, extension string, extraPrefix string, extraSu return extraPrefix + baseName + extraSuffix + extension } +func isURL(urlCandidate string) bool { + return URL.MatchString(urlCandidate) +} + func isPortRange(port string) bool { return validPortRange.MatchString(port) } @@ -408,6 +451,14 @@ func ConvertContainer(container *parser.UnitFile, names map[string]string, isUse service := container.Dup() service.Filename = replaceExtension(container.Filename, ".service", "", "") + // Add a dependency on network-online.target so the image pull does not happen + // before network is ready + // https://github.com/containers/podman/issues/21873 + // Prepend the lines, so the user-provided values + // override the default ones. + service.PrependUnitLine(UnitGroup, "After", "network-online.target") + service.PrependUnitLine(UnitGroup, "Wants", "network-online.target") + if container.Path != "" { service.Add(UnitGroup, "SourcePath", container.Path) } @@ -440,7 +491,7 @@ func ConvertContainer(container *parser.UnitFile, names map[string]string, isUse if !ok || len(containerName) == 0 { // By default, We want to name the container by the service name if strings.Contains(container.Filename, "@") { - containerName = "systemd-%P_%I" + containerName = "systemd-%p_%i" } else { containerName = "systemd-%N" } @@ -496,6 +547,7 @@ func ConvertContainer(container *parser.UnitFile, names map[string]string, isUse ) handleLogDriver(container, ContainerGroup, podman) + handleLogOpt(container, ContainerGroup, podman) // We delegate groups to the runtime service.Add(ServiceGroup, "Delegate", "yes") @@ -589,7 +641,7 @@ func ConvertContainer(container *parser.UnitFile, names map[string]string, isUse for _, device := range devices { if device[0] == '-' { device = device[1:] - _, err := os.Stat(strings.Split(device, ":")[0]) + err := fileutils.Exists(strings.Split(device, ":")[0]) if errors.Is(err, os.ErrNotExist) { continue } @@ -671,6 +723,13 @@ func ConvertContainer(container *parser.UnitFile, names map[string]string, isUse return nil, err } + groupsAdd := container.LookupAll(ContainerGroup, KeyGroupAdd) + for _, groupAdd := range groupsAdd { + if len(groupAdd) > 0 { + podman.addf("--group-add=%s", groupAdd) + } + } + tmpfsValues := container.LookupAll(ContainerGroup, KeyTmpfs) for _, tmpfs := range tmpfsValues { if strings.Count(tmpfs, ":") > 1 { @@ -1109,6 +1168,7 @@ func ConvertKube(kube *parser.UnitFile, names map[string]string, isUser bool) (* } handleLogDriver(kube, KubeGroup, execStart) + handleLogOpt(kube, KubeGroup, execStart) if err := handleUserMappings(kube, KubeGroup, execStart, isUser, false); err != nil { return nil, err @@ -1160,7 +1220,7 @@ func ConvertKube(kube *parser.UnitFile, names map[string]string, isUser bool) (* execStop.add(yamlPath) service.AddCmdline(ServiceGroup, "ExecStopPost", execStop.Args) - err = handleSetWorkingDirectory(kube, service) + _, err = handleSetWorkingDirectory(kube, service, KubeGroup) if err != nil { return nil, err } @@ -1172,6 +1232,14 @@ func ConvertImage(image *parser.UnitFile) (*parser.UnitFile, string, error) { service := image.Dup() service.Filename = replaceExtension(image.Filename, ".service", "", "-image") + // Add a dependency on network-online.target so the image pull does not happen + // before network is ready + // https://github.com/containers/podman/issues/21873 + // Prepend the lines, so the user-provided values + // override the default ones. + service.PrependUnitLine(UnitGroup, "After", "network-online.target") + service.PrependUnitLine(UnitGroup, "Wants", "network-online.target") + if image.Path != "" { service.Add(UnitGroup, "SourcePath", image.Path) } @@ -1238,6 +1306,157 @@ func ConvertImage(image *parser.UnitFile) (*parser.UnitFile, string, error) { return service, imageName, nil } +func ConvertBuild(build *parser.UnitFile, names map[string]string) (*parser.UnitFile, string, error) { + service := build.Dup() + service.Filename = replaceExtension(build.Filename, ".service", "", "-build") + + // Add a dependency on network-online.target so the image pull does not happen + // before network is ready + // https://github.com/containers/podman/issues/21873 + // Prepend the lines, so the user-provided values + // override the default ones. + service.PrependUnitLine(UnitGroup, "After", "network-online.target") + service.PrependUnitLine(UnitGroup, "Wants", "network-online.target") + + /* Rename old Build group to X-Build so that systemd ignores it */ + service.RenameGroup(BuildGroup, XBuildGroup) + + // Need the containers filesystem mounted to start podman + service.Add(UnitGroup, "RequiresMountsFor", "%t/containers") + + if build.Path != "" { + service.Add(UnitGroup, "SourcePath", build.Path) + } + + if err := checkForUnknownKeys(build, BuildGroup, supportedBuildKeys); err != nil { + return nil, "", err + } + + podman := createBasePodmanCommand(build, BuildGroup) + podman.add("build") + + stringKeys := map[string]string{ + KeyArch: "--arch", + KeyAuthFile: "--authfile", + KeyPull: "--pull", + KeyTarget: "--target", + KeyVariant: "--variant", + } + + boolKeys := map[string]string{ + KeyTLSVerify: "--tls-verify", + KeyForceRM: "--force-rm", + } + + for key, flag := range stringKeys { + lookupAndAddString(build, BuildGroup, key, flag, podman) + } + + for key, flag := range boolKeys { + lookupAndAddBoolean(build, BuildGroup, key, flag, podman) + } + + annotations := build.LookupAllKeyVal(BuildGroup, KeyAnnotation) + podman.addAnnotations(annotations) + + dns := build.LookupAll(BuildGroup, KeyDNS) + for _, ipAddr := range dns { + podman.addf("--dns=%s", ipAddr) + } + + dnsOptions := build.LookupAll(BuildGroup, KeyDNSOption) + for _, dnsOption := range dnsOptions { + podman.addf("--dns-option=%s", dnsOption) + } + + dnsSearches := build.LookupAll(BuildGroup, KeyDNSSearch) + for _, dnsSearch := range dnsSearches { + podman.addf("--dns-search=%s", dnsSearch) + } + + podmanEnv := build.LookupAllKeyVal(BuildGroup, KeyEnvironment) + podman.addEnv(podmanEnv) + + groupsAdd := build.LookupAll(BuildGroup, KeyGroupAdd) + for _, groupAdd := range groupsAdd { + if len(groupAdd) > 0 { + podman.addf("--group-add=%s", groupAdd) + } + } + + labels := build.LookupAllKeyVal(BuildGroup, KeyLabel) + podman.addLabels(labels) + + builtImageName, ok := names[build.Filename] + if !ok { + return nil, "", fmt.Errorf("no ImageTag key specified") + } + + podman.addf("--tag=%s", builtImageName) + + addNetworks(build, BuildGroup, service, names, podman) + + secrets := build.LookupAllArgs(BuildGroup, KeySecret) + for _, secret := range secrets { + podman.add("--secret", secret) + } + + if err := addVolumes(build, service, BuildGroup, names, podman); err != nil { + return nil, "", err + } + + // In order to build an image locally, we need either a File key pointing directly at a + // Containerfile, or we need a context or WorkingDirectory containing all required files. + // SetWorkingDirectory= can also be a path, a URL to either a Containerfile, a Git repo, or + // an archive. + context, err := handleSetWorkingDirectory(build, service, BuildGroup) + if err != nil { + return nil, "", err + } + + workingDirectory, okWD := service.Lookup(ServiceGroup, ServiceKeyWorkingDirectory) + filePath, okFile := build.Lookup(BuildGroup, KeyFile) + if (!okWD || len(workingDirectory) == 0) && (!okFile || len(filePath) == 0) && len(context) == 0 { + return nil, "", fmt.Errorf("neither SetWorkingDirectory, nor File key specified") + } + + if len(filePath) > 0 { + podman.addf("--file=%s", filePath) + } + + handlePodmanArgs(build, BuildGroup, podman) + + // Context or WorkingDirectory has to be last argument + if len(context) > 0 { + podman.add(context) + } else if !filepath.IsAbs(filePath) && !isURL(filePath) { + // Special handling for relative filePaths + if len(workingDirectory) == 0 { + return nil, "", fmt.Errorf("relative path in File key requires SetWorkingDirectory key to be set") + } + podman.add(workingDirectory) + } + + service.AddCmdline(ServiceGroup, "ExecStart", podman.Args) + + service.Setv(ServiceGroup, + "Type", "oneshot", + "RemainAfterExit", "yes", + + // The default syslog identifier is the exec basename (podman) + // which isn't very useful here + "SyslogIdentifier", "%N") + + return service, builtImageName, nil +} + +func GetBuiltImageName(buildUnit *parser.UnitFile) string { + if builtImageName, ok := buildUnit.Lookup(BuildGroup, KeyImageTag); ok { + return builtImageName + } + return "" +} + func GetPodServiceName(podUnit *parser.UnitFile) string { return replaceExtension(podUnit.Filename, "", "", "-pod") } @@ -1599,6 +1818,13 @@ func handleLogDriver(unitFile *parser.UnitFile, groupName string, podman *Podman } } +func handleLogOpt(unitFile *parser.UnitFile, groupName string, podman *PodmanCmdline) { + logOpts := unitFile.LookupAllStrv(groupName, KeyLogOpt) + for _, logOpt := range logOpts { + podman.add("--log-opt", logOpt) + } +} + func handleStorageSource(quadletUnitFile, serviceUnitFile *parser.UnitFile, source string, names map[string]string) (string, error) { if source[0] == '.' { var err error @@ -1660,39 +1886,67 @@ func handlePodmanArgs(unitFile *parser.UnitFile, groupName string, podman *Podma } } -func handleSetWorkingDirectory(kube, serviceUnitFile *parser.UnitFile) error { - // If WorkingDirectory is already set in the Service section do not change it - workingDir, ok := kube.Lookup(ServiceGroup, ServiceKeyWorkingDirectory) - if ok && len(workingDir) > 0 { - return nil - } - - setWorkingDirectory, ok := kube.Lookup(KubeGroup, KeySetWorkingDirectory) +func handleSetWorkingDirectory(quadletUnitFile, serviceUnitFile *parser.UnitFile, quadletGroup string) (string, error) { + setWorkingDirectory, ok := quadletUnitFile.Lookup(quadletGroup, KeySetWorkingDirectory) if !ok || len(setWorkingDirectory) == 0 { - return nil + return "", nil } var relativeToFile string + var context string switch strings.ToLower(setWorkingDirectory) { case "yaml": - relativeToFile, ok = kube.Lookup(KubeGroup, KeyYaml) + if quadletGroup != KubeGroup { + return "", fmt.Errorf("SetWorkingDirectory=%s is only supported in .kube files", setWorkingDirectory) + } + + relativeToFile, ok = quadletUnitFile.Lookup(quadletGroup, KeyYaml) + if !ok { + return "", fmt.Errorf("no Yaml key specified") + } + case "file": + if quadletGroup != BuildGroup { + return "", fmt.Errorf("SetWorkingDirectory=%s is only supported in .build files", setWorkingDirectory) + } + + relativeToFile, ok = quadletUnitFile.Lookup(quadletGroup, KeyFile) if !ok { - return fmt.Errorf("no Yaml key specified") + return "", fmt.Errorf("no File key specified") } case "unit": - relativeToFile = kube.Path + relativeToFile = quadletUnitFile.Path default: - return fmt.Errorf("unsupported value for %s: %s ", ServiceKeyWorkingDirectory, setWorkingDirectory) - } + // Path / URL handling is for .build files only + if quadletGroup != BuildGroup { + return "", fmt.Errorf("unsupported value for %s: %s ", ServiceKeyWorkingDirectory, setWorkingDirectory) + } - fileInWorkingDir, err := getAbsolutePath(kube, relativeToFile) - if err != nil { - return err + // Any value other than the above cases will be returned as context + context = setWorkingDirectory + + // If we have a relative path, set the WorkingDirectory to that of the + // quadletUnitFile + if !filepath.IsAbs(context) { + relativeToFile = quadletUnitFile.Path + } } - serviceUnitFile.Add(ServiceGroup, ServiceKeyWorkingDirectory, filepath.Dir(fileInWorkingDir)) + if len(relativeToFile) > 0 && !isURL(context) { + // If WorkingDirectory is already set in the Service section do not change it + workingDir, ok := quadletUnitFile.Lookup(ServiceGroup, ServiceKeyWorkingDirectory) + if ok && len(workingDir) > 0 { + return "", nil + } - return nil + fileInWorkingDir, err := getAbsolutePath(quadletUnitFile, relativeToFile) + if err != nil { + return "", err + } + + serviceUnitFile.Add(ServiceGroup, ServiceKeyWorkingDirectory, filepath.Dir(fileInWorkingDir)) + } + + return context, nil } func lookupAndAddString(unit *parser.UnitFile, group, key, flag string, podman *PodmanCmdline) { @@ -1710,20 +1964,22 @@ func lookupAndAddBoolean(unit *parser.UnitFile, group, key, flag string, podman } func handleImageSource(quadletImageName string, serviceUnitFile *parser.UnitFile, names map[string]string) (string, error) { - if strings.HasSuffix(quadletImageName, ".image") { - // since there is no default name conversion, the actual image name must exist in the names map - imageName, ok := names[quadletImageName] - if !ok { - return "", fmt.Errorf("requested Quadlet image %s was not found", imageName) - } + for _, suffix := range []string{".build", ".image"} { + if strings.HasSuffix(quadletImageName, suffix) { + // since there is no default name conversion, the actual image name must exist in the names map + imageName, ok := names[quadletImageName] + if !ok { + return "", fmt.Errorf("requested Quadlet image %s was not found", quadletImageName) + } - // the systemd unit name is $name-image.service - imageServiceName := replaceExtension(quadletImageName, ".service", "", "-image") + // the systemd unit name is $name-$suffix.service + imageServiceName := replaceExtension(quadletImageName, ".service", "", fmt.Sprintf("-%s", suffix[1:])) - serviceUnitFile.Add(UnitGroup, "Requires", imageServiceName) - serviceUnitFile.Add(UnitGroup, "After", imageServiceName) + serviceUnitFile.Add(UnitGroup, "Requires", imageServiceName) + serviceUnitFile.Add(UnitGroup, "After", imageServiceName) - quadletImageName = imageName + quadletImageName = imageName + } } return quadletImageName, nil diff --git a/pkg/terminal/console_windows.go b/pkg/terminal/console_windows.go index b352de5029..9a2ac58f9f 100644 --- a/pkg/terminal/console_windows.go +++ b/pkg/terminal/console_windows.go @@ -25,6 +25,7 @@ func setConsoleMode(handle windows.Handle, flags uint32) error { var mode uint32 err := windows.GetConsoleMode(handle, &mode) if err != nil { + //nolint:nilerr return nil // not a terminal } if err := windows.SetConsoleMode(handle, mode|flags); err != nil { diff --git a/pkg/trust/policy.go b/pkg/trust/policy.go index 1424ffe91e..42532e6132 100644 --- a/pkg/trust/policy.go +++ b/pkg/trust/policy.go @@ -15,6 +15,7 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/homedir" "github.com/sirupsen/logrus" ) @@ -61,7 +62,7 @@ func DefaultPolicyPath(sys *types.SystemContext) string { } userPolicyFilePath := filepath.Join(homedir.Get(), filepath.FromSlash(".config/containers/policy.json")) - _, err := os.Stat(userPolicyFilePath) + err := fileutils.Exists(userPolicyFilePath) if err == nil { return userPolicyFilePath } @@ -218,7 +219,7 @@ func AddPolicyEntries(policyPath string, input AddPolicyEntriesInput) error { return err } - _, err = os.Stat(policyPath) + err = fileutils.Exists(policyPath) if !os.IsNotExist(err) { policyContent, err := os.ReadFile(policyPath) if err != nil { diff --git a/pkg/trust/registries.go b/pkg/trust/registries.go index 959feb7267..d4516f7fcf 100644 --- a/pkg/trust/registries.go +++ b/pkg/trust/registries.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" "github.com/docker/docker/pkg/homedir" "sigs.k8s.io/yaml" ) @@ -39,7 +40,7 @@ func RegistriesDirPath(sys *types.SystemContext) string { return sys.RegistriesDirPath } userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir) - if _, err := os.Stat(userRegistriesDirPath); err == nil { + if err := fileutils.Exists(userRegistriesDirPath); err == nil { return userRegistriesDirPath } if sys != nil && sys.RootForImplicitAbsolutePaths != "" { diff --git a/pkg/util/mountOpts.go b/pkg/util/mountOpts.go deleted file mode 100644 index 2ae1fbbede..0000000000 --- a/pkg/util/mountOpts.go +++ /dev/null @@ -1,222 +0,0 @@ -package util - -import ( - "errors" - "fmt" - "strings" - - "github.com/containers/podman/v5/libpod/define" - "github.com/containers/podman/v5/pkg/rootless" -) - -var ( - // ErrBadMntOption indicates that an invalid mount option was passed. - ErrBadMntOption = errors.New("invalid mount option") - // ErrDupeMntOption indicates that a duplicate mount option was passed. - ErrDupeMntOption = errors.New("duplicate mount option passed") -) - -type defaultMountOptions struct { - noexec bool - nosuid bool - nodev bool -} - -// ProcessOptions parses the options for a bind or tmpfs mount and ensures that -// they are sensible and follow convention. The isTmpfs variable controls -// whether extra, tmpfs-specific options will be allowed. -// The sourcePath variable, if not empty, contains a bind mount source. -func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string, error) { - var ( - foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ, foundU, foundOverlay, foundIdmap, foundCopy, foundNoSwap, foundNoDereference bool - ) - - newOptions := make([]string, 0, len(options)) - for _, opt := range options { - // Some options have parameters - size, mode - key, _, _ := strings.Cut(opt, "=") - - // add advanced options such as upperdir=/path and workdir=/path, when overlay is specified - if foundOverlay { - if strings.Contains(opt, "upperdir") { - newOptions = append(newOptions, opt) - continue - } - if strings.Contains(opt, "workdir") { - newOptions = append(newOptions, opt) - continue - } - } - if strings.HasPrefix(key, "subpath") { - newOptions = append(newOptions, opt) - continue - } - if strings.HasPrefix(key, "idmap") { - if foundIdmap { - return nil, fmt.Errorf("the 'idmap' option can only be set once: %w", ErrDupeMntOption) - } - foundIdmap = true - newOptions = append(newOptions, opt) - continue - } - - switch key { - case "copy", "nocopy": - if foundCopy { - return nil, fmt.Errorf("only one of 'nocopy' and 'copy' can be used: %w", ErrDupeMntOption) - } - foundCopy = true - case "O": - foundOverlay = true - case "volume-opt": - // Volume-opt should be relayed and processed by driver. - newOptions = append(newOptions, opt) - case "exec", "noexec": - if foundExec { - return nil, fmt.Errorf("only one of 'noexec' and 'exec' can be used: %w", ErrDupeMntOption) - } - foundExec = true - case "suid", "nosuid": - if foundSuid { - return nil, fmt.Errorf("only one of 'nosuid' and 'suid' can be used: %w", ErrDupeMntOption) - } - foundSuid = true - case "nodev", "dev": - if foundDev { - return nil, fmt.Errorf("only one of 'nodev' and 'dev' can be used: %w", ErrDupeMntOption) - } - foundDev = true - case "rw", "ro": - if foundWrite { - return nil, fmt.Errorf("only one of 'rw' and 'ro' can be used: %w", ErrDupeMntOption) - } - foundWrite = true - case "private", "rprivate", "slave", "rslave", "shared", "rshared", "unbindable", "runbindable": - if foundProp { - return nil, fmt.Errorf("only one root propagation mode can be used: %w", ErrDupeMntOption) - } - foundProp = true - case "size": - if !isTmpfs { - return nil, fmt.Errorf("the 'size' option is only allowed with tmpfs mounts: %w", ErrBadMntOption) - } - if foundSize { - return nil, fmt.Errorf("only one tmpfs size can be specified: %w", ErrDupeMntOption) - } - foundSize = true - case "mode": - if !isTmpfs { - return nil, fmt.Errorf("the 'mode' option is only allowed with tmpfs mounts: %w", ErrBadMntOption) - } - if foundMode { - return nil, fmt.Errorf("only one tmpfs mode can be specified: %w", ErrDupeMntOption) - } - foundMode = true - case "tmpcopyup": - if !isTmpfs { - return nil, fmt.Errorf("the 'tmpcopyup' option is only allowed with tmpfs mounts: %w", ErrBadMntOption) - } - if foundCopyUp { - return nil, fmt.Errorf("the 'tmpcopyup' or 'notmpcopyup' option can only be set once: %w", ErrDupeMntOption) - } - foundCopyUp = true - case "consistency": - // Often used on MACs and mistakenly on Linux platforms. - // Since Docker ignores this option so shall we. - continue - case "notmpcopyup": - if !isTmpfs { - return nil, fmt.Errorf("the 'notmpcopyup' option is only allowed with tmpfs mounts: %w", ErrBadMntOption) - } - if foundCopyUp { - return nil, fmt.Errorf("the 'tmpcopyup' or 'notmpcopyup' option can only be set once: %w", ErrDupeMntOption) - } - foundCopyUp = true - // do not propagate notmpcopyup to the OCI runtime - continue - case "noswap": - - if !isTmpfs { - return nil, fmt.Errorf("the 'noswap' option is only allowed with tmpfs mounts: %w", ErrBadMntOption) - } - if rootless.IsRootless() { - return nil, fmt.Errorf("the 'noswap' option is only allowed with rootful tmpfs mounts: %w", ErrBadMntOption) - } - if foundNoSwap { - return nil, fmt.Errorf("the 'tmpswap' option can only be set once: %w", ErrDupeMntOption) - } - foundNoSwap = true - newOptions = append(newOptions, opt) - continue - case "no-dereference": - if foundNoDereference { - return nil, fmt.Errorf("the 'no-dereference' option can only be set once: %w", ErrDupeMntOption) - } - foundNoDereference = true - case define.TypeBind, "rbind": - if isTmpfs { - return nil, fmt.Errorf("the 'bind' and 'rbind' options are not allowed with tmpfs mounts: %w", ErrBadMntOption) - } - if foundBind { - return nil, fmt.Errorf("only one of 'rbind' and 'bind' can be used: %w", ErrDupeMntOption) - } - foundBind = true - case "z", "Z": - if isTmpfs { - return nil, fmt.Errorf("the 'z' and 'Z' options are not allowed with tmpfs mounts: %w", ErrBadMntOption) - } - if foundZ { - return nil, fmt.Errorf("only one of 'z' and 'Z' can be used: %w", ErrDupeMntOption) - } - foundZ = true - case "U": - if foundU { - return nil, fmt.Errorf("the 'U' option can only be set once: %w", ErrDupeMntOption) - } - foundU = true - default: - return nil, fmt.Errorf("unknown mount option %q: %w", opt, ErrBadMntOption) - } - newOptions = append(newOptions, opt) - } - - if !foundWrite { - newOptions = append(newOptions, "rw") - } - if !foundProp { - newOptions = append(newOptions, "rprivate") - } - defaults, err := getDefaultMountOptions(sourcePath) - if err != nil { - return nil, err - } - if !foundExec && defaults.noexec { - newOptions = append(newOptions, "noexec") - } - if !foundSuid && defaults.nosuid { - newOptions = append(newOptions, "nosuid") - } - if !foundDev && defaults.nodev { - newOptions = append(newOptions, "nodev") - } - if isTmpfs && !foundCopyUp { - newOptions = append(newOptions, "tmpcopyup") - } - if !isTmpfs && !foundBind { - newOptions = append(newOptions, "rbind") - } - - return newOptions, nil -} - -func ParseDriverOpts(option string) (string, string, error) { - _, val, hasVal := strings.Cut(option, "=") - if !hasVal { - return "", "", fmt.Errorf("cannot parse driver opts: %w", ErrBadMntOption) - } - optKey, optVal, hasOptVal := strings.Cut(val, "=") - if !hasOptVal { - return "", "", fmt.Errorf("cannot parse driver opts: %w", ErrBadMntOption) - } - return optKey, optVal, nil -} diff --git a/pkg/util/mount_opts.go b/pkg/util/mount_opts.go new file mode 100644 index 0000000000..c9a773093e --- /dev/null +++ b/pkg/util/mount_opts.go @@ -0,0 +1,237 @@ +package util + +import ( + "errors" + "fmt" + "strings" + + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/rootless" +) + +var ( + // ErrBadMntOption indicates that an invalid mount option was passed. + ErrBadMntOption = errors.New("invalid mount option") + // ErrDupeMntOption indicates that a duplicate mount option was passed. + ErrDupeMntOption = errors.New("duplicate mount option passed") +) + +type defaultMountOptions struct { + noexec bool + nosuid bool + nodev bool +} + +type getDefaultMountOptionsFn func(path string) (defaultMountOptions, error) + +// ProcessOptions parses the options for a bind or tmpfs mount and ensures that +// they are sensible and follow convention. The isTmpfs variable controls +// whether extra, tmpfs-specific options will be allowed. +// The sourcePath variable, if not empty, contains a bind mount source. +func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string, error) { + return processOptionsInternal(options, isTmpfs, sourcePath, getDefaultMountOptions) +} + +func processOptionsInternal(options []string, isTmpfs bool, sourcePath string, getDefaultMountOptions getDefaultMountOptionsFn) ([]string, error) { + var ( + foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ, foundU, foundOverlay, foundIdmap, foundCopy, foundNoSwap, foundNoDereference bool + ) + + recursiveBind := true + + newOptions := make([]string, 0, len(options)) + for _, opt := range options { + // Some options have parameters - size, mode + key, _, _ := strings.Cut(opt, "=") + + // add advanced options such as upperdir=/path and workdir=/path, when overlay is specified + if foundOverlay { + if strings.Contains(opt, "upperdir") { + newOptions = append(newOptions, opt) + continue + } + if strings.Contains(opt, "workdir") { + newOptions = append(newOptions, opt) + continue + } + } + if strings.HasPrefix(key, "subpath") { + newOptions = append(newOptions, opt) + continue + } + if strings.HasPrefix(key, "idmap") { + if foundIdmap { + return nil, fmt.Errorf("the 'idmap' option can only be set once: %w", ErrDupeMntOption) + } + foundIdmap = true + newOptions = append(newOptions, opt) + continue + } + + switch key { + case "copy", "nocopy": + if foundCopy { + return nil, fmt.Errorf("only one of 'nocopy' and 'copy' can be used: %w", ErrDupeMntOption) + } + foundCopy = true + case "O": + foundOverlay = true + case "volume-opt": + // Volume-opt should be relayed and processed by driver. + newOptions = append(newOptions, opt) + case "exec", "noexec": + if foundExec { + return nil, fmt.Errorf("only one of 'noexec' and 'exec' can be used: %w", ErrDupeMntOption) + } + foundExec = true + case "suid", "nosuid": + if foundSuid { + return nil, fmt.Errorf("only one of 'nosuid' and 'suid' can be used: %w", ErrDupeMntOption) + } + foundSuid = true + case "nodev", "dev": + if foundDev { + return nil, fmt.Errorf("only one of 'nodev' and 'dev' can be used: %w", ErrDupeMntOption) + } + foundDev = true + case "rw", "ro": + if foundWrite { + return nil, fmt.Errorf("only one of 'rw' and 'ro' can be used: %w", ErrDupeMntOption) + } + foundWrite = true + case "private", "rprivate", "slave", "rslave", "shared", "rshared", "unbindable", "runbindable": + if foundProp { + return nil, fmt.Errorf("only one root propagation mode can be used: %w", ErrDupeMntOption) + } + foundProp = true + case "size": + if !isTmpfs { + return nil, fmt.Errorf("the 'size' option is only allowed with tmpfs mounts: %w", ErrBadMntOption) + } + if foundSize { + return nil, fmt.Errorf("only one tmpfs size can be specified: %w", ErrDupeMntOption) + } + foundSize = true + case "mode": + if !isTmpfs { + return nil, fmt.Errorf("the 'mode' option is only allowed with tmpfs mounts: %w", ErrBadMntOption) + } + if foundMode { + return nil, fmt.Errorf("only one tmpfs mode can be specified: %w", ErrDupeMntOption) + } + foundMode = true + case "tmpcopyup": + if !isTmpfs { + return nil, fmt.Errorf("the 'tmpcopyup' option is only allowed with tmpfs mounts: %w", ErrBadMntOption) + } + if foundCopyUp { + return nil, fmt.Errorf("the 'tmpcopyup' or 'notmpcopyup' option can only be set once: %w", ErrDupeMntOption) + } + foundCopyUp = true + case "consistency": + // Often used on MACs and mistakenly on Linux platforms. + // Since Docker ignores this option so shall we. + continue + case "notmpcopyup": + if !isTmpfs { + return nil, fmt.Errorf("the 'notmpcopyup' option is only allowed with tmpfs mounts: %w", ErrBadMntOption) + } + if foundCopyUp { + return nil, fmt.Errorf("the 'tmpcopyup' or 'notmpcopyup' option can only be set once: %w", ErrDupeMntOption) + } + foundCopyUp = true + // do not propagate notmpcopyup to the OCI runtime + continue + case "noswap": + + if !isTmpfs { + return nil, fmt.Errorf("the 'noswap' option is only allowed with tmpfs mounts: %w", ErrBadMntOption) + } + if rootless.IsRootless() { + return nil, fmt.Errorf("the 'noswap' option is only allowed with rootful tmpfs mounts: %w", ErrBadMntOption) + } + if foundNoSwap { + return nil, fmt.Errorf("the 'tmpswap' option can only be set once: %w", ErrDupeMntOption) + } + foundNoSwap = true + newOptions = append(newOptions, opt) + continue + case "no-dereference": + if foundNoDereference { + return nil, fmt.Errorf("the 'no-dereference' option can only be set once: %w", ErrDupeMntOption) + } + foundNoDereference = true + case define.TypeBind: + recursiveBind = false + fallthrough + case "rbind": + if isTmpfs { + return nil, fmt.Errorf("the 'bind' and 'rbind' options are not allowed with tmpfs mounts: %w", ErrBadMntOption) + } + if foundBind { + return nil, fmt.Errorf("only one of 'rbind' and 'bind' can be used: %w", ErrDupeMntOption) + } + foundBind = true + case "z", "Z": + if isTmpfs { + return nil, fmt.Errorf("the 'z' and 'Z' options are not allowed with tmpfs mounts: %w", ErrBadMntOption) + } + if foundZ { + return nil, fmt.Errorf("only one of 'z' and 'Z' can be used: %w", ErrDupeMntOption) + } + foundZ = true + case "U": + if foundU { + return nil, fmt.Errorf("the 'U' option can only be set once: %w", ErrDupeMntOption) + } + foundU = true + default: + return nil, fmt.Errorf("unknown mount option %q: %w", opt, ErrBadMntOption) + } + newOptions = append(newOptions, opt) + } + + if !foundWrite { + newOptions = append(newOptions, "rw") + } + if !foundProp { + if recursiveBind { + newOptions = append(newOptions, "rprivate") + } else { + newOptions = append(newOptions, "private") + } + } + defaults, err := getDefaultMountOptions(sourcePath) + if err != nil { + return nil, err + } + if !foundExec && defaults.noexec { + newOptions = append(newOptions, "noexec") + } + if !foundSuid && defaults.nosuid { + newOptions = append(newOptions, "nosuid") + } + if !foundDev && defaults.nodev { + newOptions = append(newOptions, "nodev") + } + if isTmpfs && !foundCopyUp { + newOptions = append(newOptions, "tmpcopyup") + } + if !isTmpfs && !foundBind { + newOptions = append(newOptions, "rbind") + } + + return newOptions, nil +} + +func ParseDriverOpts(option string) (string, string, error) { + _, val, hasVal := strings.Cut(option, "=") + if !hasVal { + return "", "", fmt.Errorf("cannot parse driver opts: %w", ErrBadMntOption) + } + optKey, optVal, hasOptVal := strings.Cut(val, "=") + if !hasOptVal { + return "", "", fmt.Errorf("cannot parse driver opts: %w", ErrBadMntOption) + } + return optKey, optVal, nil +} diff --git a/pkg/util/mountOpts_linux.go b/pkg/util/mount_opts_linux.go similarity index 100% rename from pkg/util/mountOpts_linux.go rename to pkg/util/mount_opts_linux.go diff --git a/pkg/util/mountOpts_other.go b/pkg/util/mount_opts_other.go similarity index 100% rename from pkg/util/mountOpts_other.go rename to pkg/util/mount_opts_other.go diff --git a/pkg/util/utils.go b/pkg/util/utils.go index b93ec40c64..602d120549 100644 --- a/pkg/util/utils.go +++ b/pkg/util/utils.go @@ -25,7 +25,9 @@ import ( "github.com/containers/podman/v5/pkg/rootless" "github.com/containers/podman/v5/pkg/signal" "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/unshare" stypes "github.com/containers/storage/types" securejoin "github.com/cyphar/filepath-securejoin" ruser "github.com/moby/sys/user" @@ -96,14 +98,14 @@ func ParseDockerignore(containerfiles []string, root string) ([]string, string, if dockerIgnoreErr != nil { for _, containerfile := range containerfiles { containerfile = strings.TrimPrefix(containerfile, root) - if _, err := os.Stat(filepath.Join(root, containerfile+".containerignore")); err == nil { + if err := fileutils.Exists(filepath.Join(root, containerfile+".containerignore")); err == nil { path, symlinkErr = securejoin.SecureJoin(root, containerfile+".containerignore") if symlinkErr == nil { ignoreFile = path ignore, dockerIgnoreErr = os.ReadFile(path) } } - if _, err := os.Stat(filepath.Join(root, containerfile+".dockerignore")); err == nil { + if err := fileutils.Exists(filepath.Join(root, containerfile+".dockerignore")); err == nil { path, symlinkErr = securejoin.SecureJoin(root, containerfile+".dockerignore") if symlinkErr == nil { ignoreFile = path @@ -136,7 +138,10 @@ func ParseRegistryCreds(creds string) (*types.DockerAuthConfig, error) { username, password := parseCreds(creds) if username == "" { fmt.Print("Username: ") - fmt.Scanln(&username) + _, err := fmt.Scanln(&username) + if err != nil { + return nil, fmt.Errorf("could not read username: %w", err) + } } if password == "" { fmt.Print("Password: ") @@ -180,24 +185,53 @@ func ParseSignal(rawSignal string) (syscall.Signal, error) { return sig, nil } -// GetKeepIDMapping returns the mappings and the user to use when keep-id is used -func GetKeepIDMapping(opts *namespaces.KeepIDUserNsOptions) (*stypes.IDMappingOptions, int, int, error) { +func getRootlessKeepIDMapping(uid, gid int, uids, gids []idtools.IDMap) (*stypes.IDMappingOptions, int, int, error) { options := stypes.IDMappingOptions{ HostUIDMapping: false, HostGIDMapping: false, } + maxUID, maxGID := 0, 0 + for _, u := range uids { + maxUID += u.Size + } + for _, g := range gids { + maxGID += g.Size + } + + options.UIDMap, options.GIDMap = nil, nil + + if len(uids) > 0 && uid != 0 { + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(uid, maxUID)}) + } + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid, HostID: 0, Size: 1}) + if maxUID > uid { + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid + 1, HostID: uid + 1, Size: maxUID - uid}) + } + + if len(gids) > 0 && gid != 0 { + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(gid, maxGID)}) + } + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid, HostID: 0, Size: 1}) + if maxGID > gid { + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid + 1, HostID: gid + 1, Size: maxGID - gid}) + } + + return &options, uid, gid, nil +} +// GetKeepIDMapping returns the mappings and the user to use when keep-id is used +func GetKeepIDMapping(opts *namespaces.KeepIDUserNsOptions) (*stypes.IDMappingOptions, int, int, error) { if !rootless.IsRootless() { - uids, err := rootless.ReadMappingsProc("/proc/self/uid_map") - if err != nil { - return nil, 0, 0, err + options := stypes.IDMappingOptions{ + HostUIDMapping: false, + HostGIDMapping: false, } - gids, err := rootless.ReadMappingsProc("/proc/self/gid_map") + uids, gids, err := unshare.GetHostIDMappings("") if err != nil { return nil, 0, 0, err } - options.UIDMap = uids - options.GIDMap = gids + options.UIDMap = RuntimeSpecToIDtools(uids) + options.GIDMap = RuntimeSpecToIDtools(gids) uid, gid := 0, 0 if opts.UID != nil { @@ -224,33 +258,7 @@ func GetKeepIDMapping(opts *namespaces.KeepIDUserNsOptions) (*stypes.IDMappingOp return nil, -1, -1, fmt.Errorf("cannot read mappings: %w", err) } - maxUID, maxGID := 0, 0 - for _, u := range uids { - maxUID += u.Size - } - for _, g := range gids { - maxGID += g.Size - } - - options.UIDMap, options.GIDMap = nil, nil - - if len(uids) > 0 { - options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(uid, maxUID)}) - } - options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid, HostID: 0, Size: 1}) - if maxUID > uid { - options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid + 1, HostID: uid + 1, Size: maxUID - uid}) - } - - if len(gids) > 0 { - options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(gid, maxGID)}) - } - options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid, HostID: 0, Size: 1}) - if maxGID > gid { - options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid + 1, HostID: gid + 1, Size: maxGID - gid}) - } - - return &options, uid, gid, nil + return getRootlessKeepIDMapping(uid, gid, uids, gids) } // GetNoMapMapping returns the mappings and the user to use when nomap is used @@ -1136,7 +1144,7 @@ func ParseInputTime(inputTime string, since bool) (time.Time, error) { func OpenExclusiveFile(path string) (*os.File, error) { baseDir := filepath.Dir(path) if baseDir != "" { - if _, err := os.Stat(baseDir); err != nil { + if err := fileutils.Exists(baseDir); err != nil { return nil, err } } @@ -1332,10 +1340,10 @@ func ParseRestartPolicy(policy string) (string, uint, error) { return policyType, retriesUint, nil } -// ConvertTimeout converts negative timeout to MaxInt, which indicates approximately infinity, waiting to stop containers +// ConvertTimeout converts negative timeout to MaxUint32, which indicates approximately infinity, waiting to stop containers func ConvertTimeout(timeout int) uint { if timeout < 0 { - return math.MaxInt + return math.MaxUint32 } return uint(timeout) } diff --git a/pkg/util/utils_linux.go b/pkg/util/utils_linux.go index eadc8d3ba8..85d819b9b2 100644 --- a/pkg/util/utils_linux.go +++ b/pkg/util/utils_linux.go @@ -37,7 +37,9 @@ func FindDeviceNodes() (map[string]string, error) { nodes := make(map[string]string) err := filepath.WalkDir("/dev", func(path string, d fs.DirEntry, err error) error { if err != nil { - logrus.Warnf("Error descending into path %s: %v", path, err) + if !errors.Is(err, fs.ErrNotExist) { + logrus.Warnf("Error descending into path %s: %v", path, err) + } return filepath.SkipDir } @@ -104,7 +106,6 @@ func AddPrivilegedDevices(g *generate.Generator, systemdMode bool) error { if err != nil { return err } - g.ClearLinuxDevices() if rootless.IsRootless() { mounts := make(map[string]interface{}) diff --git a/pkg/util/utils_supported.go b/pkg/util/utils_supported.go index 024c93dace..28c52f2936 100644 --- a/pkg/util/utils_supported.go +++ b/pkg/util/utils_supported.go @@ -28,7 +28,7 @@ func GetRootlessConfigHomeDir() (string, error) { // GetRootlessPauseProcessPidPath returns the path to the file that holds the pid for // the pause process. func GetRootlessPauseProcessPidPath() (string, error) { - runtimeDir, err := GetRootlessRuntimeDir() + runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", err } diff --git a/pkg/util/utils_test.go b/pkg/util/utils_test.go index d680cfdc86..dd53ac1c36 100644 --- a/pkg/util/utils_test.go +++ b/pkg/util/utils_test.go @@ -2,10 +2,13 @@ package util import ( "fmt" + "math" + "sort" "testing" "time" "github.com/containers/storage/pkg/idtools" + stypes "github.com/containers/storage/types" ruser "github.com/moby/sys/user" "github.com/opencontainers/runtime-spec/specs-go" "github.com/stretchr/testify/assert" @@ -573,3 +576,197 @@ func TestConvertMappings(t *testing.T) { assert.Equal(t, start[i].Size, convertedBack[i].Size) } } + +func TestConvertTimeout(t *testing.T) { + timeout := ConvertTimeout(0) + assert.Equal(t, uint(0), timeout) + + timeout = ConvertTimeout(100) + assert.Equal(t, uint(100), timeout) + + timeout = ConvertTimeout(-1) + assert.Equal(t, uint(math.MaxUint32), timeout) + + timeout = ConvertTimeout(-100) + assert.Equal(t, uint(math.MaxUint32), timeout) +} + +func TestGetRootlessKeepIDMapping(t *testing.T) { + tests := []struct { + uid, gid int + uids, gids []idtools.IDMap + expectedOptions *stypes.IDMappingOptions + expectedUID, expectedGID int + expectedError error + }{ + { + uid: 1000, + gid: 1000, + uids: []idtools.IDMap{}, + gids: []idtools.IDMap{}, + expectedOptions: &stypes.IDMappingOptions{ + HostUIDMapping: false, + HostGIDMapping: false, + UIDMap: []idtools.IDMap{{ContainerID: 1000, HostID: 0, Size: 1}}, + GIDMap: []idtools.IDMap{{ContainerID: 1000, HostID: 0, Size: 1}}, + }, + expectedUID: 1000, + expectedGID: 1000, + }, + { + uid: 0, + gid: 0, + uids: []idtools.IDMap{{ContainerID: 0, HostID: 100000, Size: 65536}}, + gids: []idtools.IDMap{{ContainerID: 0, HostID: 100000, Size: 65536}}, + expectedOptions: &stypes.IDMappingOptions{ + HostUIDMapping: false, + HostGIDMapping: false, + UIDMap: []idtools.IDMap{{ContainerID: 0, HostID: 0, Size: 1}, {ContainerID: 1, HostID: 1, Size: 65536}}, + GIDMap: []idtools.IDMap{{ContainerID: 0, HostID: 0, Size: 1}, {ContainerID: 1, HostID: 1, Size: 65536}}, + }, + expectedUID: 0, + expectedGID: 0, + }, + } + + for _, test := range tests { + options, uid, gid, err := getRootlessKeepIDMapping(test.uid, test.gid, test.uids, test.gids) + assert.Nil(t, err) + assert.Equal(t, test.expectedOptions, options) + assert.Equal(t, test.expectedUID, uid) + assert.Equal(t, test.expectedGID, gid) + } +} + +func getDefaultMountOptionsNoStat(path string) (defaultMountOptions, error) { + return defaultMountOptions{false, true, true}, nil +} + +func TestProcessOptions(t *testing.T) { + tests := []struct { + name string + options []string + isTmpfs bool + sourcePath string + expected []string + expectErr bool + }{ + { + name: "tmpfs", + options: []string{"rw", "size=512m"}, + isTmpfs: true, + sourcePath: "", + expected: []string{"nodev", "nosuid", "rprivate", "rw", "size=512m", "tmpcopyup"}, + }, + { + name: "duplicate idmap option", + sourcePath: "/path/to/source", + options: []string{"idmap", "idmap"}, + expectErr: true, + }, + { + name: "mode allowed only with tmpfs", + sourcePath: "/path/to/source", + options: []string{"rw", "rbind", "mode=0123"}, + expectErr: true, + }, + { + name: "noswap allowed only with tmpfs", + sourcePath: "/path/to/source", + options: []string{"noswap"}, + expectErr: true, + }, + { + name: "tmpcopyup allowed only with tmpfs", + sourcePath: "/path/to/source", + options: []string{"tmpcopyup"}, + expectErr: true, + }, + { + name: "notmpcopyup allowed only with tmpfs", + sourcePath: "/path/to/source", + options: []string{"notmpcopyup"}, + expectErr: true, + }, + { + name: "z not allowed with tmpfs", + isTmpfs: true, + sourcePath: "/path/to/source", + options: []string{"z"}, + expectErr: true, + }, + { + name: "size allowed only with tmpfs", + sourcePath: "/path/to/source", + options: []string{"size=123456"}, + expectErr: true, + }, + { + name: "conflicting option dev/nodev", + sourcePath: "/path/to/source", + options: []string{"dev", "nodev"}, + expectErr: true, + }, + { + name: "conflicting option suid/nosuid", + sourcePath: "/path/to/source", + options: []string{"suid", "nosuid"}, + expectErr: true, + }, + { + name: "conflicting option exec/noexec", + sourcePath: "/path/to/source", + options: []string{"noexec", "exec"}, + expectErr: true, + }, + { + name: "conflicting option ro/rw", + sourcePath: "/path/to/source", + options: []string{"ro", "rw"}, + expectErr: true, + }, + { + name: "conflicting option bind/rbind", + sourcePath: "/path/to/source", + options: []string{"bind", "rbind"}, + expectErr: true, + }, + { + name: "conflicting option bind/rbind", + sourcePath: "/path/to/source", + options: []string{"bind", "rbind"}, + expectErr: true, + }, + { + name: "default bind mount", + sourcePath: "/path/to/source", + expected: []string{"nodev", "nosuid", "rbind", "rprivate", "rw"}, + }, + { + name: "default bind mount with bind", + sourcePath: "/path/to/source", + options: []string{"bind"}, + expected: []string{"nodev", "nosuid", "bind", "private", "rw"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + opts, err := processOptionsInternal(tt.options, tt.isTmpfs, tt.sourcePath, getDefaultMountOptionsNoStat) + if tt.expectErr { + assert.NotNil(t, err) + } else { + assert.Nil(t, err) + sort.Strings(opts) + sort.Strings(tt.expected) + assert.Equal(t, opts, tt.expected) + } + }) + } +} + +func TestGetRootlessPauseProcessPidPath(t *testing.T) { + dir, err := GetRootlessPauseProcessPidPath() + assert.NoError(t, err) + assert.NotEqual(t, dir, "libpod/tmp/pause.pid") +} diff --git a/rootless.md b/rootless.md index 9574130e4b..09e3d2e35a 100644 --- a/rootless.md +++ b/rootless.md @@ -8,6 +8,8 @@ Contributors are more than welcomed to help with this work. If you decide to ca * The kernel does not allow processes without CAP_NET_BIND_SERVICE to bind to low ports. * You can modify the `net.ipv4.ip_unprivileged_port_start` sysctl to change the lowest port. For example `sysctl net.ipv4.ip_unprivileged_port_start=443` allows rootless Podman containers to bind to ports >= 443. * A proxy server, kernel firewall rule, or redirection tool such as [redir](https://github.com/troglobit/redir) may be used to redirect traffic from a privileged port to an unprivileged one (where a podman pod is bound) in a server scenario - where a user has access to the root account (or setuid on the binary would be an acceptable risk), but wants to run the containers as an unprivileged user for enhanced security and for a limited number of pre-known ports. +* As of Podman 5.0, pasta is the default networking tool. Since pasta copies the IP address of the main interface, connections to that IP from containers do not work. This means that unless you have more than one interface, inter-container connections cannot be made without explicitly passing a pasta network configuration, either in `containers.conf` or at runtime. + * If you previously had port forwards (ex. via -p 80:80) that other containers could access, you can either revert back to slirp4netns or use the solution (setting pasta options with 10.0.2.x IPs) posted [here](https://blog.podman.io/2024/03/podman-5-0-breaking-changes-in-detail/). * “How To” documentation is patchy at best. * If /etc/subuid and /etc/subgid are not set up for a user, then podman commands can easily fail diff --git a/rpm/podman.spec b/rpm/podman.spec index 62ca4dce9f..1de5c74b4b 100644 --- a/rpm/podman.spec +++ b/rpm/podman.spec @@ -11,13 +11,8 @@ # set it separately here and do not depend on RHEL's go-[s]rpm-macros package # until that's fixed. # c9s bz: https://bugzilla.redhat.com/show_bug.cgi?id=2227328 -# c8s bz: https://bugzilla.redhat.com/show_bug.cgi?id=2227331 %if %{defined rhel} && 0%{?rhel} < 10 %define gobuild(o:) go build -buildmode pie -compiler gc -tags="rpm_crashtraceback libtrust_openssl ${BUILDTAGS:-}" -ldflags "-linkmode=external -compressdwarf=false ${LDFLAGS:-} -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n') -extldflags '%__global_ldflags'" -a -v -x %{?**}; -# python3 dep conditional for rhel8 -%if %{?rhel} == 8 -%define rhel8py3 1 -%endif %endif %global gomodulesmode GO111MODULE=on @@ -81,7 +76,7 @@ BuildRequires: glibc-devel BuildRequires: glibc-static BuildRequires: golang BuildRequires: git-core -%if !%{defined gobuild} +%if %{undefined rhel} || 0%{?rhel} >= 10 BuildRequires: go-rpm-macros %endif BuildRequires: gpgme-devel @@ -96,18 +91,16 @@ BuildRequires: man-db BuildRequires: ostree-devel BuildRequires: systemd BuildRequires: systemd-devel -%if %{defined rhel8py3} -BuildRequires: python3 -%endif Requires: catatonit Requires: conmon >= 2:2.1.7-2 -Requires: containers-common-extra -%if %{defined rhel} && !%{defined eln} -Recommends: gvisor-tap-vsock-gvforwarder +%if %{defined fedora} && 0%{?fedora} >= 40 +# TODO: Remove the f40 conditional after a few releases to keep conditionals to +# a minimum +# Ref: https://bugzilla.redhat.com/show_bug.cgi?id=2269148 +Requires: containers-common-extra >= 5:0.58.0-1 %else -Requires: gvisor-tap-vsock-gvforwarder +Requires: containers-common-extra %endif -Recommends: gvisor-tap-vsock Provides: %{name}-quadlet Obsoletes: %{name}-quadlet <= 5:4.4.0-1 Provides: %{name}-quadlet = %{epoch}:%{version}-%{release} @@ -124,8 +117,6 @@ additional privileges. Both tools share image (not container) storage, hence each can use or manipulate images (but not containers) created by the other. -%{summary} -%{repo} Simple management tool for pods, containers and images %package docker Summary: Emulate Docker CLI using %{name} @@ -146,7 +137,9 @@ pages and %{name}. Summary: Tests for %{name} Requires: %{name} = %{epoch}:%{version}-%{release} +%if %{defined fedora} Requires: bats +%endif Requires: jq Requires: skopeo Requires: nmap-ncat @@ -187,6 +180,17 @@ capabilities specified in user quadlets. It is a symlink to %{_bindir}/%{name} and execs into the `%{name}sh` container when `%{_bindir}/%{name}sh` is set as a login shell or set as os.Args[0]. +%package machine +Summary: Metapackage for setting up %{name} machine +Requires: %{name} = %{epoch}:%{version}-%{release} +Requires: gvisor-tap-vsock +Requires: qemu +Requires: virtiofsd + +%description machine +This subpackage installs the dependencies for %{name} machine, for more see: +https://docs.podman.io/en/latest/markdown/podman-machine.1.html + %prep %autosetup -Sgit -n %{name}-%{version_no_tilde} sed -i 's;@@PODMAN@@\;$(BINDIR);@@PODMAN@@\;%{_bindir};' Makefile @@ -219,7 +223,7 @@ export CGO_CFLAGS+=" -m64 -mtune=generic -fcf-protection=full" export GOPROXY=direct -LDFLAGS="-X %{ld_libpod}/define.buildInfo=$(date +%s) \ +LDFLAGS="-X %{ld_libpod}/define.buildInfo=${SOURCE_DATE_EPOCH:-$(date +%s)} \ -X %{ld_libpod}/config._installPrefix=%{_prefix} \ -X %{ld_libpod}/config._etcDir=%{_sysconfdir} \ -X %{ld_project}/pkg/systemd/quadlet._binDir=%{_bindir}" @@ -241,6 +245,10 @@ export BUILDTAGS="$BASEBUILDTAGS exclude_graphdriver_btrfs btrfs_noversion remot export BUILDTAGS="$BASEBUILDTAGS $(hack/btrfs_installed_tag.sh) $(hack/btrfs_tag.sh)" %gobuild -o bin/quadlet ./cmd/quadlet +# build %%{name}-testing +export BUILDTAGS="$BASEBUILDTAGS $(hack/btrfs_installed_tag.sh) $(hack/btrfs_tag.sh)" +%gobuild -o bin/podman-testing ./cmd/podman-testing + # reset LDFLAGS for plugins binaries LDFLAGS='' @@ -256,27 +264,31 @@ PODMAN_VERSION=%{version} %{__make} DESTDIR=%{buildroot} PREFIX=%{_prefix} ETCDI install.docker \ install.docker-docs \ install.remote \ + install.testing \ %if %{defined _modulesloaddir} - install.modules-load + install.modules-load %endif sed -i 's;%{buildroot};;g' %{buildroot}%{_bindir}/docker # do not include docker and podman-remote man pages in main package -for file in `find %{buildroot}%{_mandir}/man[15] -type f | sed "s,%{buildroot},," | grep -v -e remote -e docker`; do - echo "$file*" >> podman.file-list +for file in `find %{buildroot}%{_mandir}/man[15] -type f | sed "s,%{buildroot},," | grep -v -e %{name}sh.1 -e remote -e docker`; do + echo "$file*" >> %{name}.file-list done rm -f %{buildroot}%{_mandir}/man5/docker*.5 -install -d -p %{buildroot}/%{_datadir}/%{name}/test/system -cp -pav test/system %{buildroot}/%{_datadir}/%{name}/test/ +install -d -p %{buildroot}%{_datadir}/%{name}/test/system +cp -pav test/system %{buildroot}%{_datadir}/%{name}/test/ + +# symlink virtiofsd in %%{name} libexecdir for machine subpackage +ln -s ../virtiofsd %{buildroot}%{_libexecdir}/%{name} #define license tag if not already defined %{!?_licensedir:%global license %doc} %files -f %{name}.file-list -%license LICENSE +%license LICENSE vendor/modules.txt %doc README.md CONTRIBUTING.md install.md transfer.md %{_bindir}/%{name} %dir %{_libexecdir}/%{name} @@ -315,15 +327,16 @@ cp -pav test/system %{buildroot}/%{_datadir}/%{name}/test/ %{_datadir}/zsh/site-functions/_%{name}-remote %files tests +%{_bindir}/%{name}-testing %{_datadir}/%{name}/test %files -n %{name}sh %{_bindir}/%{name}sh +%{_mandir}/man1/%{name}sh.1* + +%files machine +%dir %{_libexecdir}/%{name} +%{_libexecdir}/%{name}/virtiofsd %changelog -%if %{defined autochangelog} %autochangelog -%else -* Mon May 01 2023 RH Container Bot -- Placeholder changelog for envs that are not autochangelog-ready -%endif diff --git a/test/README.md b/test/README.md index 98b447e968..f374d3e7fb 100644 --- a/test/README.md +++ b/test/README.md @@ -110,7 +110,7 @@ file itself. Consider the following actual test: It("podman inspect bogus pod", func() { session := podmanTest.Podman([]string{"pod", "inspect", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "no such pod foobar")) }) ``` @@ -174,7 +174,3 @@ For usage run: ``` hack/bats --help ``` - -## Contributing to system tests - -Please see [the TODO list of needed workflows/tests](system/TODO.md). diff --git a/test/apiv2/10-images.at b/test/apiv2/10-images.at index 06bee6cffb..8cd7ed2234 100644 --- a/test/apiv2/10-images.at +++ b/test/apiv2/10-images.at @@ -274,6 +274,12 @@ else _show_ok 1 "compat quiet build" fi +# Do not try a real build here to tests the comma separated syntax as emulation +# is slow and may not work everywhere, checking the error is good enough to know +# we parsed it correctly on the server I would say +t POST "/build?q=1&dockerfile=containerfile&platform=linux/amd64,test" $CONTAINERFILE_WITH_ERR_TAR 400 \ + .message="failed to parse query parameter 'platform': \"test\": invalid platform syntax for --platform=\"test\": \"test\": unknown operating system or architecture: invalid argument" + cleanBuildTest # compat API vs libpod API event differences: @@ -343,4 +349,14 @@ t GET libpod/images/no-alias-for-sure/resolve 200 \ t GET libpod/images/noCAPITALcharAllowed/resolve 400 \ .cause="repository name must be lowercase" + +START=$(date +%s.%N) +# test pull-error API response +podman pull --retry 0 localhost:5000/idonotexist || true +t GET "libpod/events?stream=false&since=$START" 200 \ + .status=pull-error \ + .Action=pull-error \ + .Actor.Attributes.name="localhost:5000/idonotexist" \ + .Actor.Attributes.error~".*connection refused" + # vim: filetype=sh diff --git a/test/apiv2/12-imagesMore.at b/test/apiv2/12-imagesMore.at index 584d7c3958..5f54969f7d 100644 --- a/test/apiv2/12-imagesMore.at +++ b/test/apiv2/12-imagesMore.at @@ -90,3 +90,41 @@ t DELETE libpod/images/$IMAGE 200 \ podman system connection rm $conn stop_registry + +# if an image is a manifest list, it should not have +# anything for arch or os +podman manifest create foobar +t GET libpod/images/json 200 \ .[0].IsManifestList=true \ + .[0].Arch=null \ + .[0].Os=null + + +# list images through the libpod endpoint should return +# IsManifestList (bool), Arch (string), and Os (string) +podman pull -q $IMAGE +t GET libpod/images/json 200 \ .[0].IsManifestList=true\ + .[0].Arch=null \ + .[0].Os=null \ + '.[0].RepoDigests | length=1' \ + .[1].IsManifestList=false \ + .[1].Arch=amd64 \ + .[1].Os=linux + +# if a manifest list and an image are returned with libpod images +# endpoint, then one should be a manifest with IsManifest only; and +# the other image should have IsManifestList, Arch, and Os. +podman manifest add --arch amd64 foobar $IMAGE +t GET libpod/images/json 200 .[0].IsManifestList=true\ + .[0].Arch=null \ + .[0].Os=null \ + '.[0].RepoDigests | length=2' \ + .[1].IsManifestList=false \ + .[1].Arch=amd64 \ + .[1].Os=linux + +t GET images/json 200 .[0].IsManifestList=null \ + .[0].Arch=null \ + .[0].Os=null \ + .[1].IsManifestList=null \ + .[1].Arch=null \ + .[1].Os=null \ diff --git a/test/apiv2/15-manifest.at b/test/apiv2/15-manifest.at index 8b43740e5a..44f3c7c83e 100644 --- a/test/apiv2/15-manifest.at +++ b/test/apiv2/15-manifest.at @@ -3,6 +3,7 @@ # Tests for manifest list endpoints start_registry +export REGISTRY_PORT # Creates the manifest list t POST /v3.4.0/libpod/manifests/create?name=abc 200 \ @@ -28,7 +29,7 @@ RUN >file2 EOF ) -# manifest add --anotation tests +# manifest add --annotation tests t POST /v3.4.0/libpod/manifests/$id_abc/add images="[\"containers-storage:$id_abc_image\"]" 200 t PUT /v4.0.0/libpod/manifests/$id_xyz operation='update' images="[\"containers-storage:$id_xyz_image\"]" annotations="{\"foo\":\"bar\"}" annotation="[\"hoge=fuga\"]" 400 \ .cause='can not set both Annotation and Annotations' @@ -66,5 +67,130 @@ t POST "/v4.0.0/libpod/manifests/xyz:latest/registry/localhost:$REGISTRY_PORT%2F t DELETE /v4.0.0/libpod/manifests/$id_abc 200 t DELETE /v4.0.0/libpod/manifests/$id_xyz 200 +# manifest add --artifact tests +truncate -s 20M $WORKDIR/zeroes +function test_artifacts_with_args() { + # these values, ideally, are local to our caller + local args="$artifact_annotations $artifact_config $artifact_config_type $artifact_exclude_titles $artifact_layer_type $artifact_type" + t POST /v5.0.0/libpod/manifests/artifacts 201 + local id_artifacts=$(jq -r '.Id' <<<"$output") + t PUT /v5.0.0/libpod/manifests/$id_artifacts operation='update' $args --form=listed.txt="oh yeah" --form=zeroes=@"$WORKDIR/zeroes" 200 + t POST "/v5.0.0/libpod/manifests/artifacts:latest/registry/localhost:$REGISTRY_PORT%2Fartifacts:latest?tlsVerify=false&all=true" 200 + + local index=$(skopeo inspect --raw --tls-verify=false docker://localhost:$REGISTRY_PORT/artifacts:latest) + # jq <<<"$index" + local digest=$(jq -r '.manifests[0].digest' <<<"$index") + local artifact=$(skopeo inspect --raw --tls-verify=false docker://localhost:$REGISTRY_PORT/artifacts@${digest}) + # jq <<<"$artifact" + + local expect_type + case ${artifact_type} in + artifact_type=*) + expect_type=${artifact_type#artifact_type=} + expect_type=${expect_type:-null};; + *) + expect_type=application/vnd.unknown.artifact.v1;; + esac + is $(jq -r '.artifactType'<<<"$artifact") "${expect_type}" "artifactType in artifact manifest with artifact_type arg \"${artifact_type}\"" + is $(jq -r '.manifests[0].artifactType'<<<"$index") "${expect_type}" "artifactType in image index with artifact_type arg \"${artifact_type}\"" + + local expect_annotations + case ${artifact_annotations} in + artifact_annotations=*) + expect_annotations=$(jq -r '.foo' <<<"${artifact_annotations#artifact_annotations=}");; + *) + expect_annotations=null;; + esac + is $(jq -r '.annotations["foo"]'<<<"$artifact") "$expect_annotations" "\"foo\" annotation in artifact manifest with artifact_annotations arg \"${artifact_annotations}\"" + + local expect_config_size + case ${artifact_config} in + artifact_config=*) + expect_config_size=$(wc -c <<<"${artifact_config#artifact_config=}") + expect_config_size=$((expect_config_size-1)) + if [[ $expect_config_size -eq 0 ]]; then + expect_config_size=2 + fi ;; + *) + expect_config_size=2;; + esac + is $(jq -r '.config.size'<<<"$artifact") "$expect_config_size" "size of config blob in artifact manifest with artifact_config arg \"${artifact_config}\"" + + local expect_config_type + case ${artifact_config_type} in + artifact_config_type=*) + expect_config_type=${artifact_config_type#artifact_config_type=} + if [[ -z "$expect_config_type" ]] ; then + if [[ -n "${artifact_config#artifact_config=}" ]] ; then + expect_config_type=application/vnd.oci.image.config.v1+json + else + expect_config_type=application/vnd.oci.empty.v1+json + fi + fi;; + *) + if [[ -n "${artifact_config#artifact_config=}" ]] ; then + expect_config_type=application/vnd.oci.image.config.v1+json + else + expect_config_type=application/vnd.oci.empty.v1+json + fi;; + esac + is $(jq -r '.config.mediaType'<<<"$artifact") "$expect_config_type" "mediaType of config blob in artifact manifest with artifact_config_type arg \"${artifact_config_type}\" and artifact_config arg \"${artifact_config}\"" + + local expect_first_layer_type expect_second_layer_type + case ${artifact_layer_type} in + artifact_layer_type=*) + expect_first_layer_type=${artifact_layer_type#artifact_layer_type=} + expect_first_layer_type=${expect_first_layer_type:-text/plain} + expect_second_layer_type=${artifact_layer_type#artifact_layer_type=} + expect_second_layer_type=${expect_second_layer_type:-application/octet-stream};; + *) + expect_first_layer_type=text/plain; + expect_second_layer_type=application/octet-stream;; + esac + is $(jq -r '.layers[0].mediaType'<<<"$artifact") "$expect_first_layer_type" "mediaType of listed.txt layer in artifact manifest with artifact_layer_type arg \"${artifact_layer_type}\"" + is $(jq -r '.layers[1].mediaType'<<<"$artifact") "$expect_second_layer_type" "mediaType of zeroes layer in artifact manifest with artifact_layer_type arg \"${artifact_layer_type}\"" + + local expect_first_title expect_second_title + case ${artifact_exclude_titles} in + artifact_exclude_titles=true) + expect_first_title=null; + expect_second_title=null;; + *) + expect_first_title=listed.txt; + expect_second_title=zeroes;; + esac + is $(jq -r '.layers[0].annotations["org.opencontainers.image.title"]'<<<"$artifact") "$expect_first_title" "org.opencontainers.image.title annotation on listed.txt layer in artifact manifest with artifact_exclude_titles arg \"${artifact_exclude_titles}\"" + is $(jq -r '.layers[1].annotations["org.opencontainers.image.title"]'<<<"$artifact") "$expect_second_title" "org.opencontainers.image.title annotation on zeroes layer in artifact manifest with artifact_exclude_titles arg \"${artifact_exclude_titles}\"" + + t DELETE /v5.0.0/libpod/manifests/$id_artifacts 200 +} + +function test_artifacts() { + local artifact_annotations + local artifact_config + local artifact_config_type + local artifact_exclude_titles + local artifact_layer_type + local artifact_type + for artifact_annotations in "" artifact_annotations='{"foo":"bar"}' ; do + test_artifacts_with_args + done + for artifact_config in "" artifact_config= artifact_config="{}"; do + for artifact_config_type in "" artifact_config_type= artifact_config_type=text/plain ; do + test_artifacts_with_args + done + done + for artifact_exclude_titles in "" artifact_exclude_titles=true ; do + test_artifacts_with_args + done + for artifact_layer_type in "" artifact_layer_type= artifact_layer_type=text/plain artifact_layer_type=application/octet-stream ; do + test_artifacts_with_args + done + for artifact_type in "" artifact_type= artifact_type=text/plain artifact_type=application/octet-stream ; do + test_artifacts_with_args + done +} +test_artifacts + podman rmi -a stop_registry diff --git a/test/apiv2/20-containers.at b/test/apiv2/20-containers.at index 009d12fa25..d73187650b 100644 --- a/test/apiv2/20-containers.at +++ b/test/apiv2/20-containers.at @@ -45,6 +45,11 @@ t POST "/v4.7.0/libpod/containers/foo/attach?logs=true&stream=false" 200 response_headers=$(cat "$WORKDIR/curl.headers.out") like "$response_headers" ".*Content-Type: application/vnd\.docker\.multiplexed-stream.*" "vnd.docker.multiplexed-stream libpod v4.7.0" +t POST "containers/foo/attach?logs=true&stream=false" 101 +response_headers=$(cat "$WORKDIR/curl.headers.out") +like "$response_headers" ".*Content-Type: application/vnd\.docker\.raw-stream.*" "hijacked connection header: Content-type: application/vnd.docker.raw-stream" +like "$response_headers" ".*Upgrade: tcp.*" "hijacked connection header: Upgrade: tcp" + t POST "containers/foo/kill" 204 podman run --replace --name=foo -v /tmp:/tmp $IMAGE true @@ -69,7 +74,7 @@ t GET libpod/containers/json?all=true 200 \ .[0].IsInfra=false # Test compat API for Network Settings (.Network is N/A when rootless) -network_expect="Networks.slirp4netns.NetworkID=slirp4netns" +network_expect="Networks.pasta.NetworkID=pasta" if root; then network_expect="Networks.podman.NetworkID=podman" fi @@ -107,21 +112,8 @@ t GET libpod/containers/json?last=1 200 \ cid=$(jq -r '.[0].Id' <<<"$output") -if root; then - t GET libpod/containers/stats?containers='[$cid]' 200 -else - if have_cgroupsv2; then - t GET libpod/containers/stats?containers='[$cid]' 200 - else - t GET libpod/containers/stats?containers='[$cid]' 409 - fi -fi - -# max_usage is not set for cgroupv2 -if have_cgroupsv2; then - t GET libpod/containers/stats?containers='[$cid]' 200 \ - .memory_stats.max_usage=null -fi +t GET "libpod/containers/stats?containers=$cid&stream=false" 200 \ + .memory_stats.max_usage=null t DELETE libpod/containers/$cid 200 .[0].Id=$cid @@ -236,20 +228,13 @@ t GET libpod/images/newrepo:v1/json 200 \ cparam="repo=newrepo&tag=v2&comment=bar&author=eric" cparam="$cparam&format=docker&changes=CMD=/bin/foo" -if root || have_cgroupsv2; then - t POST "libpod/commit?container=${cid:0:12}&$cparam&pause=true" 200 - t GET libpod/images/newrepo:v2/json 200 \ - .RepoTags[0]=localhost/newrepo:v2 \ - .Author=eric \ - .Comment=bar \ - .Config.Cmd[-1]="/bin/foo" - t DELETE images/localhost/newrepo:v2?force=true 200 -else - # cgroupsv1 rootless : pause is not supported in cgroups v1 rootless - t POST "libpod/commit?container=${cid:0:12}&$cparam&pause=true" 500 \ - .cause="this container does not have a cgroup" \ - .message~".*pause containers on rootless containers with cgroup V1" -fi +t POST "libpod/commit?container=${cid:0:12}&$cparam&pause=true" 200 +t GET libpod/images/newrepo:v2/json 200 \ + .RepoTags[0]=localhost/newrepo:v2 \ + .Author=eric \ + .Comment=bar \ + .Config.Cmd[-1]="/bin/foo" +t DELETE images/localhost/newrepo:v2?force=true 200 # Create a container for testing the container initializing later podman create -t -i --name myctr $IMAGE ls @@ -479,24 +464,8 @@ t POST containers/prune?filters='{"network":["anynetwork"]}' 500 \ # Test CPU limit (NanoCPUs) nanoCpu=500000 -if have_cgroupsv2; then - t POST containers/create Image=$IMAGE HostConfig='{"NanoCpus":500000}' 201 \ - .Id~[0-9a-f]\\{64\\} -else - if root; then - # cgroupsv1 rootful : NanoCpus needs to set more than 10000000 - t POST containers/create Image=$IMAGE HostConfig='{"NanoCpus":500000}' 500 \ - .cause="CPU cfs quota cannot be less than 1ms (i.e. 1000)" - t POST containers/create Image=$IMAGE HostConfig='{"NanoCpus":10000000}' 201 \ - .Id~[0-9a-f]\\{64\\} - nanoCpu=10000000 - else - # cgroupsv1 rootless : Resource limits that include NanoCPUs are not supported and ignored - t POST containers/create Image=$IMAGE HostConfig='{"NanoCpus":500000}' 201 \ - .Id~[0-9a-f]\\{64\\} - nanoCpu=0 - fi -fi +t POST containers/create Image=$IMAGE HostConfig='{"NanoCpus":500000}' 201 \ + .Id~[0-9a-f]\\{64\\} cid=$(jq -r '.Id' <<<"$output") t GET containers/$cid/json 200 \ @@ -527,6 +496,29 @@ t GET containers/$cid/json 200 \ t DELETE containers/$cid?v=true 204 +# test create container like Docker >= 25 cli: NetworkMode="default" but EndpointsConfig struct is explicitly set and netns="host" +t POST containers/create \ + Image=$IMAGE \ + HostConfig='{"NetworkMode":"default"}' \ + NetworkingConfig='{"EndpointsConfig":{"default":{"IPAMConfig":null,"Links":null,"Aliases":null,"MacAddress":"","NetworkID":"","EndpointID":"","Gateway":"","IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"DriverOpts":null,"DNSNames":null}}}' \ + 201 \ + .Id~[0-9a-f]\\{64\\} +cid=$(jq -r '.Id' <<<"$output") +t GET containers/$cid/json 200 \ + .HostConfig.NetworkMode="host" + +t DELETE containers/$cid?v=true 204 + +# test creating a container fails with netns="hosts" on podman side but keep using the default network mode +# on docker CLI side and trying to use --ip 1.2.3.4 which is only valid for the bridge network mode (docker CLI +# will assume the default is the bridge mode, so it's valid from docker CLI point of view). +t POST containers/create \ + Image=$IMAGE \ + HostConfig='{"NetworkMode":"default"}' \ + NetworkingConfig='{"EndpointsConfig":{"default":{"IPAMConfig":null,"Links":null,"Aliases":null,"MacAddress":"","NetworkID":"","EndpointID":"","Gateway":"","IPAddress":"1.2.3.4","IPPrefixLen":0,"IPv6Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"DriverOpts":null,"DNSNames":null}}}' \ + 500 \ + .cause="networks and static ip/mac address can only be used with Bridge mode networking" + # Restart with the default containers.conf for next tests. stop_service start_service @@ -649,14 +641,11 @@ t GET containers/status-test/json 200 .State.Status="created" podman start status-test t GET containers/status-test/json 200 .State.Status="running" -# cgroupsv1 rootless : pause and unpause are not supported in cgroups v1 rootless -if root || have_cgroupsv2; then - podman pause status-test - t GET containers/status-test/json 200 .State.Status="paused" +podman pause status-test +t GET containers/status-test/json 200 .State.Status="paused" - podman unpause status-test - t GET containers/status-test/json 200 .State.Status="running" -fi +podman unpause status-test +t GET containers/status-test/json 200 .State.Status="running" podman stop status-test & sleep 1 @@ -690,11 +679,6 @@ if root; then cgroupPath=/sys/fs/cgroup/cpu.weight # 002 is the byte length cpu_weight_expect=$'\001\0025' - if ! have_cgroupsv2; then - cgroupPath=/sys/fs/cgroup/cpu/cpu.shares - # 004 is the byte length - cpu_weight_expect=$'\001\004123' - fi # Verify echo '{ "AttachStdout":true,"Cmd":["cat", "'$cgroupPath'"]}' >${TMPD}/exec.json @@ -702,6 +686,12 @@ if root; then eid=$(jq -r '.Id' <<<"$output") t POST exec/$eid/start 200 $cpu_weight_expect + # Now use the compat API + echo '{ "Memory": 536870912 }' >${TMPD}/compatupdate.json + t POST containers/updateCtr/update ${TMPD}/compatupdate.json 200 + t GET libpod/containers/updateCtr/json 200 \ + .HostConfig.Memory=536870912 + podman rm -f updateCtr fi diff --git a/test/apiv2/25-containersMore.at b/test/apiv2/25-containersMore.at index d9c9fa0160..802997377d 100644 --- a/test/apiv2/25-containersMore.at +++ b/test/apiv2/25-containersMore.at @@ -8,7 +8,7 @@ podman pull $IMAGE &>/dev/null # Ensure clean slate podman rm -a -f &>/dev/null -podman run -d --name foo $IMAGE top +podman run -d --name foo --entrypoint='["sh","-c"]' $IMAGE top # Check exists for none such t GET libpod/containers/nonesuch/exists 404 @@ -44,7 +44,15 @@ t GET libpod/containers/foo/json 200 \ .State.Status=running \ .ImageName=$IMAGE \ .Config.Cmd[0]=top \ - .Name=foo + .Name=foo \ + .Config.StopSignal="SIGTERM" \ + .Config.Entrypoint[0]="sh" \ + .Config.Entrypoint[1]="-c" + +# now check v4 request return old compatible output +t GET /v4.0.0/libpod/containers/foo/json 200 \ + .Config.StopSignal=15 \ + .Config.Entrypoint="sh -c" # List processes of the container t GET libpod/containers/foo/top 200 \ diff --git a/test/apiv2/26-containersWait.at b/test/apiv2/26-containersWait.at index 3bd4165d06..81ba304a63 100644 --- a/test/apiv2/26-containersWait.at +++ b/test/apiv2/26-containersWait.at @@ -33,7 +33,7 @@ wait "${child_pid}" APIV2_TEST_EXPECT_TIMEOUT=2 t POST "containers/${CTR}/wait?condition=next-exit" 999 like "$(<$WORKDIR/curl.headers.out)" ".*HTTP.* 200 OK.*" \ "Received headers from /wait" -if [[ -e $WORKDIR/curl.result.out ]]; then +if [[ -s $WORKDIR/curl.result.out ]]; then _show_ok 0 "UNEXPECTED: curl on /wait returned results" fi diff --git a/test/apiv2/27-containersEvents.at b/test/apiv2/27-containersEvents.at index 082cfabf45..e57ee46777 100644 --- a/test/apiv2/27-containersEvents.at +++ b/test/apiv2/27-containersEvents.at @@ -29,7 +29,7 @@ t GET "events?stream=false&since=$START" 200 \ 'select(.status | contains("die")).Actor.Attributes.exitCode=1' t GET "events?stream=false&since=$START&type=remove" 200 \ - 'select(.status| contains("remove")).Action=remove' \ + 'select(.status | contains("remove")).Action=remove' \ 'select(.status | contains("remove")).Actor.Attributes.containerExitCode=1' # vim: filetype=sh diff --git a/test/apiv2/28-containersAnnotations.at b/test/apiv2/28-containersAnnotations.at new file mode 100644 index 0000000000..89466f555d --- /dev/null +++ b/test/apiv2/28-containersAnnotations.at @@ -0,0 +1,8 @@ +# -*- sh -*- + +podman pull $IMAGE &>/dev/null +t POST containers/create Image=$IMAGE HostConfig='{"annotations":{"foo":"bar","zoo":"boo"}}' 201 .Id~[0-9a-f]\\{64\\} +cid=$(jq -r '.Id' <<<"$output") +t GET containers/$cid/json 200 \ + .HostConfig.Annotations.foo=bar \ + .HostConfig.Annotations.zoo=boo \ diff --git a/test/apiv2/35-networks.at b/test/apiv2/35-networks.at index 976c2a4045..e5701e4cc8 100644 --- a/test/apiv2/35-networks.at +++ b/test/apiv2/35-networks.at @@ -192,6 +192,22 @@ t DELETE libpod/networks/macvlan1 200 \ .[0].Name~macvlan1 \ .[0].Err=null + +# create network with isolate option and make sure it is not shown in docker compat endpoint +podman network create --opt isolate=true isolate-test +# Note the order of both list calls is important to test for https://github.com/containers/podman/issues/22330 +# First call the compat endpoint, then the libpod one. Previously this would have removed +# the internal option for the libpod endpoint as well. +t GET networks?filters='{"name":["isolate-test"]}' 200 \ + .[0].Name=isolate-test \ + .[0].Options="{}" + +t GET libpod/networks/json?filters='{"name":["isolate-test"]}' 200 \ + .[0].name=isolate-test \ + .[0].options.isolate="true" + +t DELETE libpod/networks/isolate-test 200 + # # test networks with containers # diff --git a/test/apiv2/python/requirements.txt b/test/apiv2/python/requirements.txt index a3596dd639..cc5079519e 100644 --- a/test/apiv2/python/requirements.txt +++ b/test/apiv2/python/requirements.txt @@ -1,5 +1,8 @@ -requests-mock~=1.11.0 -requests~=2.31.0 -setuptools~=69.1.0 -python-dateutil~=2.8.1 +requests-mock~=1.12.1 +requests~=2.32.3 +setuptools~=70.1.0 +python-dateutil~=2.9.0 PyYAML~=6.0.0 +openapi-schema-validator~=0.6.2 +pytest==8.1.2 +docker~=6.1.0 diff --git a/test/apiv2/test-apiv2 b/test/apiv2/test-apiv2 index c4d04d6d4a..14227ea8fe 100755 --- a/test/apiv2/test-apiv2 +++ b/test/apiv2/test-apiv2 @@ -1,6 +1,6 @@ #!/usr/bin/env bash # -# Usage: test-apiv2 [PORT] +# Usage: test-apiv2 testglob # # DEVELOPER NOTE: you almost certainly don't need to play in here. See README. # @@ -238,17 +238,17 @@ function jsonify() { function t() { local method=$1; shift local path=$1; shift - local -a curl_args + local -a curl_args form_args local content_type="application/json" local testname="$method $path" # POST and PUT requests may be followed by one or more key=value pairs. # Slurp the command line until we see a 3-digit status code. - if [[ $method = "POST" || $method == "PUT" || $method = "DELETE" ]]; then + if [[ $method = "POST" || $method == "PUT" || $method == "DELETE" ]]; then local -a post_args - if [[ $method = "POST" ]]; then + if [[ $method == "POST" ]]; then function _add_curl_args() { curl_args+=(--data-binary @$1); } else function _add_curl_args() { curl_args+=(--upload-file $1); } @@ -260,6 +260,10 @@ function t() { # --disable makes curl not lookup the curlrc file, it shouldn't affect the tests in any way. -) curl_args+=(--disable); shift;; + --form=*) form_args+=(--form); + form_args+=("${arg#--form=}"); + content_type="multipart/form-data"; + shift;; *=*) post_args+=("$arg"); shift;; *.json) _add_curl_args $arg; @@ -276,9 +280,12 @@ function t() { *) die "Internal error: invalid POST arg '$arg'" ;; esac done - if [[ -z "$curl_args" ]]; then + if [[ -z "${curl_args[*]}" && -z "${form_args[*]}" ]]; then curl_args=(-d $(jsonify ${post_args[*]})) testname="$testname [${curl_args[*]}]" + elif [[ -z "${curl_args[*]}" ]]; then + curl_args=(--form request.json=$(jsonify ${post_args[*]}) "${form_args[@]}") + testname="$testname [${curl_args[*]} ${form_args[*]}]" fi fi @@ -298,7 +305,7 @@ function t() { url=http://$HOST:$PORT case "$path" in /*) url="$url$path" ;; - libpod/*) url="$url/v4.0.0/$path" ;; + libpod/*) url="$url/v5.0.0/$path" ;; *) url="$url/v1.41/$path" ;; esac fi @@ -315,18 +322,37 @@ function t() { local expected_code=$1; shift + if [[ "$expected_code" == "101" ]]; then + curl_args+=("-H" "Connection: upgrade" "-H" "Upgrade: tcp") + fi + # Log every action we do echo "-------------------------------------------------------------" >>$LOG echo "\$ $testname" >>$LOG rm -f $WORKDIR/curl.* - # -s = silent, but --write-out 'format' gives us important response data + # The --write-out 'format' gives us important response data. # The hairy "{ ...;rc=$?; } || :" lets us capture curl's exit code and # give a helpful diagnostic if it fails. - { response=$(curl -s -X $method "${curl_args[@]}" \ + : > $WORKDIR/curl.result.out + : > $WORKDIR/curl.result.err + { response=$(curl -X $method "${curl_args[@]}" \ -H "Content-type: $content_type" \ --dump-header $WORKDIR/curl.headers.out \ + -v --stderr $WORKDIR/curl.result.err \ --write-out '%{http_code}^%{content_type}^%{time_total}' \ -o $WORKDIR/curl.result.out "$url"); rc=$?; } || : + if [ -n "$PODMAN_TESTS_DUMP_TRACES" ]; then + # Dump the results we got back, exactly as we got them back. + echo "\$ begin stdout" >>$LOG + test -s $WORKDIR/curl.result.out && od -t x1c $WORKDIR/curl.result.out 2>&1 >>$LOG + echo "\$ end stdout" >>$LOG + echo "\$ begin stderr" >>$LOG + test -s $WORKDIR/curl.result.err && cat $WORKDIR/curl.result.err >>$LOG + echo "\$ end stderr" >>$LOG + echo "\$ begin response code^content_type^time_total" >>$LOG + od -t x1c <<< "$response" >>$LOG + echo "\$ end response" >>$LOG + fi # Special case: this means we *expect and want* a timeout if [[ -n "$APIV2_TEST_EXPECT_TIMEOUT" ]]; then diff --git a/test/buildah-bud/apply-podman-deltas b/test/buildah-bud/apply-podman-deltas index 53e5a8caea..832a7e94e7 100755 --- a/test/buildah-bud/apply-podman-deltas +++ b/test/buildah-bud/apply-podman-deltas @@ -134,9 +134,7 @@ errmsg "non-directory/Dockerfile: not a directory" \ "bud with a path to a Dockerfile (-f) containing a non-directory entry" errmsg "no such file or directory" \ - "Error: context must be a directory:" \ - "bud with dir for file but no Dockerfile in dir" \ - "bud with bad dir Dockerfile" + "Error: context must be a directory:" errmsg "no such file or directory" \ "Error: no context directory and no Containerfile specified" \ @@ -144,11 +142,11 @@ errmsg "no such file or directory" \ errmsg "is not a file" \ "Error: no Containerfile or Dockerfile specified or found in context directory" \ - "bud with specified context should fail if assumed Dockerfile is a directory" + "bud with specified context should fail if Dockerfile in context directory is actually a file" errmsg "no such file or directory" \ "context must be a directory" \ - "bud with specified context should fail if context contains not-existing Dockerfile" + "bud with specified context should fail if context directory does not exist" # 2022-04-26 after buildah PR 3926 (where Ed added error-message checks" errmsg "no FROM statement found" \ @@ -159,8 +157,8 @@ errmsg "no contents in .*" \ "Error: context must be a directory: .*" \ "bud with specified context should fail if context contains empty Dockerfile" -errmsg "credential file is not accessible: stat /tmp/nonexistent: no such file or directory" \ - "Error: credential file is not accessible: stat /tmp/nonexistent: no such file or directory" \ +errmsg "credential file is not accessible: faccessat /tmp/nonexistent: no such file or directory" \ + "Error: credential file is not accessible: faccessat /tmp/nonexistent: no such file or directory" \ "bud with Containerfile should fail with nonexistent authfile" errmsg "cannot find Containerfile or Dockerfile" \ @@ -256,6 +254,9 @@ skip_if_remote "Explicit request in buildah PR 4190 to skip this on remote" \ skip_if_rootless "Flakes when run rootless, too. See Buildah PR 4190" \ "build: test race in updating image name while performing parallel commits" +skip_if_remote "--events-backend does not work with podman-remote" \ + "build test default ulimits" + ############################################################################### # BEGIN tests which are skipped due to actual podman or podman-remote bugs. @@ -288,6 +289,16 @@ skip_if_rootless_remote "FIXME: not sure if 17788 or some other bug" \ skip "FIXME: 2023-06-13 buildah PR 4746 broke this test" \ "bud with encrypted FROM image" +# 2024-04-16 test needs to be fixed in buildah repo, to use another registry +skip "FIXME: 2024-04-16 nixery is down" \ + "bud-implicit-no-history" + +# 2024-05-28 FIXME FIXME FIXME new VMs barf on all git tests, can't connect +skip "FIXME: 2024-05-28 new VMs from #338" \ + "bud-git-context" \ + "bud-git-context-subdirectory" \ + "bud using gitrepo and branch" + # END temporary workarounds that must be reevaluated periodically ############################################################################### diff --git a/test/buildah-bud/buildah-tests.diff b/test/buildah-bud/buildah-tests.diff index f8029493ad..481e99bfd8 100644 --- a/test/buildah-bud/buildah-tests.diff +++ b/test/buildah-bud/buildah-tests.diff @@ -1,4 +1,4 @@ -From 716a55e6ce36c30df4dc9cde3d1b43b8c7d19c9e Mon Sep 17 00:00:00 2001 +From 09b115ea501320bde7cf979f280f42dc803aa70b Mon Sep 17 00:00:00 2001 From: Ed Santiago Date: Thu, 6 Oct 2022 17:32:59 -0600 Subject: [PATCH] tweaks for running buildah tests under podman @@ -9,12 +9,12 @@ Signed-off-by: Ed Santiago 1 file changed, 115 insertions(+), 4 deletions(-) diff --git a/tests/helpers.bash b/tests/helpers.bash -index 349145f29..3a0348f0b 100644 +index f4245c8bc..8df18c0cc 100644 --- a/tests/helpers.bash +++ b/tests/helpers.bash -@@ -70,6 +70,38 @@ EOF - ROOTDIR_OPTS="--root ${TEST_SCRATCH_DIR}/root --runroot ${TEST_SCRATCH_DIR}/runroot --storage-driver ${STORAGE_DRIVER}" +@@ -71,6 +71,38 @@ EOF BUILDAH_REGISTRY_OPTS="--registries-conf ${TEST_SOURCES}/registries.conf --registries-conf-dir ${TEST_SCRATCH_DIR}/registries.d --short-name-alias-conf ${TEST_SCRATCH_DIR}/cache/shortnames.conf" + COPY_REGISTRY_OPTS="--registries-conf ${TEST_SOURCES}/registries.conf --registries-conf-dir ${TEST_SCRATCH_DIR}/registries.d --short-name-alias-conf ${TEST_SCRATCH_DIR}/cache/shortnames.conf" PODMAN_REGISTRY_OPTS="--registries-conf ${TEST_SOURCES}/registries.conf" + + PODMAN_SERVER_PID= @@ -51,7 +51,7 @@ index 349145f29..3a0348f0b 100644 } function starthttpd() { -@@ -113,6 +145,32 @@ function teardown_tests() { +@@ -114,6 +146,32 @@ function teardown_tests() { stop_git_daemon stop_registry @@ -84,7 +84,7 @@ index 349145f29..3a0348f0b 100644 # Workaround for #1991 - buildah + overlayfs leaks mount points. # Many tests leave behind /var/tmp/.../root/overlay and sub-mounts; # let's find those and clean them up, otherwise 'rm -rf' fails. -@@ -202,7 +260,12 @@ function copy() { +@@ -203,7 +261,12 @@ function copy() { } function podman() { @@ -98,7 +98,7 @@ index 349145f29..3a0348f0b 100644 } # There are various scenarios where we would like to execute `tests` as rootless user, however certain commands like `buildah mount` -@@ -266,8 +329,36 @@ function run_buildah() { +@@ -267,8 +330,36 @@ function run_buildah() { --retry) retry=3; shift;; # retry network flakes esac @@ -136,7 +136,7 @@ index 349145f29..3a0348f0b 100644 # If session is rootless and `buildah mount` is invoked, perform unshare, # since normal user cannot mount a filesystem unless they're in a user namespace along with its own mount namespace. -@@ -281,8 +372,8 @@ function run_buildah() { +@@ -282,8 +373,8 @@ function run_buildah() { retry=$(( retry - 1 )) # stdout is only emitted upon error; this echo is to help a debugger @@ -147,7 +147,7 @@ index 349145f29..3a0348f0b 100644 # without "quotes", multiple lines are glommed together into one if [ -n "$output" ]; then echo "$output" -@@ -621,6 +712,26 @@ function skip_if_no_docker() { +@@ -644,6 +735,26 @@ function skip_if_no_unshare() { fi } @@ -175,5 +175,5 @@ index 349145f29..3a0348f0b 100644 daemondir=${TEST_SCRATCH_DIR}/git-daemon mkdir -p ${daemondir}/repo -- -2.39.2 +2.44.0 diff --git a/test/compose/README.md b/test/compose/README.md index 863decf2c9..12ecbaebfc 100644 --- a/test/compose/README.md +++ b/test/compose/README.md @@ -1,7 +1,8 @@ -Tests for docker-compose -======================== +Tests for docker-compose v2 +=========================== -This directory contains tests for docker-compose under podman. +This directory contains tests for docker-compose v2 under podman. +docker-compose v1 is no longer supported upstream so we no longer test with it. Each subdirectory must contain one docker-compose.yml file along with all necessary infrastructure for it (e.g. Containerfile, any files diff --git a/test/compose/etc_hosts/tests.sh b/test/compose/etc_hosts/tests.sh index 16ed314bf7..80da78c083 100644 --- a/test/compose/etc_hosts/tests.sh +++ b/test/compose/etc_hosts/tests.sh @@ -1,9 +1,6 @@ # -*- bash -*- -ctr_name="etc_hosts_test_1" -if [ "$TEST_FLAVOR" = "compose_v2" ]; then - ctr_name="etc_hosts-test-1" -fi +ctr_name="etc_hosts-test-1" podman exec "$ctr_name" sh -c 'grep "foobar" /etc/hosts' like "$output" "10\.123\.0\." "$testname : no entries are copied from the host" diff --git a/test/compose/ipam_set_ip/tests.sh b/test/compose/ipam_set_ip/tests.sh index 945303e52a..4686fb82ed 100644 --- a/test/compose/ipam_set_ip/tests.sh +++ b/test/compose/ipam_set_ip/tests.sh @@ -1,9 +1,6 @@ # -*- bash -*- -ctr_name="ipam_set_ip_test_1" -if [ "$TEST_FLAVOR" = "compose_v2" ]; then - ctr_name="ipam_set_ip-test-1" -fi +ctr_name="ipam_set_ip-test-1" podman container inspect "$ctr_name" --format '{{ .NetworkSettings.Networks.ipam_set_ip_net1.IPAddress }}' is "$output" "10.123.0.253" "$testname : ip address is set" podman container inspect "$ctr_name" --format '{{ .NetworkSettings.Networks.ipam_set_ip_net1.MacAddress }}' diff --git a/test/compose/test-compose b/test/compose/test-compose index a123a8de2a..3275f987f6 100755 --- a/test/compose/test-compose +++ b/test/compose/test-compose @@ -247,6 +247,15 @@ function podman() { return $rc } +# as rootless we want to test the remote connection so we add --connection +function podman_compose() { + if is_rootless; then + $PODMAN_BIN --connection compose-sock compose "$@" + else + podman compose "$@" + fi +} + ################### # random_string # Returns a pseudorandom human-readable string ################### @@ -271,12 +280,26 @@ done # When rootless use a socket path accessible by the rootless user if is_rootless; then + # lets test two cases here, for rootless we try to connect to the connection as this should be respected DOCKER_SOCK="$WORKDIR/docker.sock" + # use PODMAN_CONNECTIONS_CONF so we do not overwrite user settings + PODMAN_CONNECTIONS_CONF="$WORKDIR/connections.json" + export PODMAN_CONNECTIONS_CONF + $PODMAN_BIN system connection add --default notexists "unix:///I/do/not/exist" + $PODMAN_BIN system connection add compose-sock "unix://$DOCKER_SOCK" + +else + # export DOCKER_HOST docker-compose will use it + DOCKER_HOST="unix://$DOCKER_SOCK" + export DOCKER_HOST fi -# export DOCKER_HOST docker-compose will use it -DOCKER_HOST="unix://$DOCKER_SOCK" -export DOCKER_HOST +# hide annoying podman compose warnings, some tests want to check compose stderr and this breaks it. +CONTAINERS_CONF_OVERRIDE="$WORKDIR/containers.conf" +echo '[engine] +compose_warning_logs=false' > "$CONTAINERS_CONF_OVERRIDE" +export CONTAINERS_CONF_OVERRIDE + # Identify the tests to run. If called with args, use those as globs. tests_to_run=() @@ -336,12 +359,12 @@ for t in "${tests_to_run[@]}"; do trap - ERR fi - podman compose up -d &> $logfile + podman_compose up -d &> $logfile docker_compose_rc=$? if [[ $docker_compose_rc -ne 0 ]]; then _show_ok 0 "$testname - up" "[ok]" "status=$docker_compose_rc" sed -e 's/^/# /' <$logfile - podman compose down >>$logfile 2>&1 # No status check here + podman_compose down >>$logfile 2>&1 # No status check here exit 1 fi _show_ok 1 "$testname - up" @@ -361,7 +384,7 @@ for t in "${tests_to_run[@]}"; do fi # Done. Clean up. - podman compose down &>> $logfile + podman_compose down &>> $logfile rc=$? if [[ $rc -eq 0 ]]; then _show_ok 1 "$testname - down" diff --git a/test/compose/two_networks/tests.sh b/test/compose/two_networks/tests.sh index af0d1fbe37..e22b76c3cb 100644 --- a/test/compose/two_networks/tests.sh +++ b/test/compose/two_networks/tests.sh @@ -1,9 +1,6 @@ # -*- bash -*- -ctr_name="two_networks_con1_1" -if [ "$TEST_FLAVOR" = "compose_v2" ]; then - ctr_name="two_networks-con1-1" -fi +ctr_name="two_networks-con1-1" podman container inspect "$ctr_name" --format '{{len .NetworkSettings.Networks}}' is "$output" "2" "$testname : Container is connected to both networks" podman container inspect "$ctr_name" --format '{{.NetworkSettings.Networks}}' diff --git a/test/compose/uptwice/tests.sh b/test/compose/uptwice/tests.sh index 013b5a29a8..5001eb7a03 100644 --- a/test/compose/uptwice/tests.sh +++ b/test/compose/uptwice/tests.sh @@ -5,13 +5,10 @@ NL=$'\n' cp docker-compose.yml docker-compose.yml.bak sed -i -e 's/10001/10002/' docker-compose.yml -output=$(docker-compose up -d 2>&1) +output=$(podman_compose up -d 2>&1) # Horrible output check here but we really want to make sure that there are # no unexpected warning/errors and the normal messages are send on stderr as # well so we cannot check for an empty stderr. -expected="Recreating uptwice_app_1 ... ${CR}${NL}Recreating uptwice_app_1 ... done$CR" -if [ "$TEST_FLAVOR" = "compose_v2" ]; then - expected="Container uptwice-app-1 Recreate${NL}Container uptwice-app-1 Recreated${NL}Container uptwice-app-1 Starting${NL}Container uptwice-app-1 Started" -fi +expected="Container uptwice-app-1 Recreate${NL}Container uptwice-app-1 Recreated${NL}Container uptwice-app-1 Starting${NL}Container uptwice-app-1 Started" is "$output" "$expected" "no error output in compose up (#15580)" diff --git a/test/e2e/attach_test.go b/test/e2e/attach_test.go index f81614fdd5..1f22559023 100644 --- a/test/e2e/attach_test.go +++ b/test/e2e/attach_test.go @@ -7,7 +7,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman attach", func() { @@ -15,7 +14,7 @@ var _ = Describe("Podman attach", func() { It("podman attach to bogus container", func() { session := podmanTest.Podman([]string{"attach", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, `no container with name or ID "foobar" found: no such container`)) }) It("podman attach to non-running container", func() { @@ -25,7 +24,7 @@ var _ = Describe("Podman attach", func() { results := podmanTest.Podman([]string{"attach", "test1"}) results.WaitWithDefaultTimeout() - Expect(results).Should(Exit(125)) + Expect(results).Should(ExitWithError(125, "you can only attach to running containers")) }) It("podman container attach to non-running container", func() { @@ -36,7 +35,7 @@ var _ = Describe("Podman attach", func() { results := podmanTest.Podman([]string{"container", "attach", "test1"}) results.WaitWithDefaultTimeout() - Expect(results).Should(Exit(125)) + Expect(results).Should(ExitWithError(125, "you can only attach to running containers")) }) It("podman attach to multiple containers", func() { @@ -50,7 +49,7 @@ var _ = Describe("Podman attach", func() { results := podmanTest.Podman([]string{"attach", "test1", "test2"}) results.WaitWithDefaultTimeout() - Expect(results).Should(Exit(125)) + Expect(results).Should(ExitWithError(125, " attach` accepts at most one argument")) }) It("podman attach to a running container", func() { diff --git a/test/e2e/build_test.go b/test/e2e/build_test.go index 8042dfcb0a..d2d50d49c6 100644 --- a/test/e2e/build_test.go +++ b/test/e2e/build_test.go @@ -94,6 +94,25 @@ var _ = Describe("Podman build", func() { Expect(session).Should(ExitCleanly()) }) + It("podman build with not found Containerfile or Dockerfile", func() { + targetPath := filepath.Join(podmanTest.TempDir, "notfound") + err = os.Mkdir(targetPath, 0755) + Expect(err).ToNot(HaveOccurred()) + defer func() { + Expect(os.RemoveAll(targetPath)).To(Succeed()) + }() + + session := podmanTest.Podman([]string{"build", targetPath}) + session.WaitWithDefaultTimeout() + expectStderr := fmt.Sprintf("no Containerfile or Dockerfile specified or found in context directory, %s", targetPath) + Expect(session).Should(ExitWithError(125, expectStderr)) + + session = podmanTest.Podman([]string{"build", "-f", "foo", targetPath}) + session.WaitWithDefaultTimeout() + expectStderr = fmt.Sprintf("the specified Containerfile or Dockerfile does not exist, %s", "foo") + Expect(session).Should(ExitWithError(125, expectStderr)) + }) + It("podman build with logfile", func() { logfile := filepath.Join(podmanTest.TempDir, "logfile") session := podmanTest.Podman([]string{"build", "--pull=never", "--tag", "test", "--logfile", logfile, "build/basicalpine"}) @@ -121,7 +140,7 @@ var _ = Describe("Podman build", func() { It("podman build context directory a file", func() { session := podmanTest.Podman([]string{"build", "--pull=never", "build/context_dir_a_file"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "context must be a directory:")) }) // Check that builds with different values for the squash options @@ -190,12 +209,11 @@ var _ = Describe("Podman build", func() { // Test if entire build is used from cache Expect(session.OutputToString()).To(ContainSubstring("Using cache")) - session = podmanTest.Podman([]string{"inspect", "--format", "{{.RootFS.Layers}}", "test-squash-d"}) + session = podmanTest.Podman([]string{"inspect", "--format", "{{.RootFS.Layers}}", "test"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) // Check for one layers Expect(strings.Fields(session.OutputToString())).To(HaveLen(1)) - }) It("podman build Containerfile locations", func() { @@ -314,8 +332,7 @@ RUN printenv http_proxy`, CITEST_IMAGE) // this tries to use the cache so we explicitly disable it session = podmanTest.Podman([]string{"build", "--no-cache", "--pull-never", "--http-proxy=false", "--file", dockerfilePath, podmanTest.TempDir}) session.Wait(120) - Expect(session).Should(Exit(1)) - Expect(session.ErrorToString()).To(ContainSubstring(`Error: building at STEP "RUN printenv http_proxy"`)) + Expect(session).Should(ExitWithError(1, `Error: building at STEP "RUN printenv http_proxy"`)) }) It("podman build relay exit code to process", func() { @@ -332,7 +349,7 @@ RUN exit 5`, CITEST_IMAGE) Expect(err).ToNot(HaveOccurred()) session := podmanTest.Podman([]string{"build", "-t", "error-test", "--file", dockerfilePath, podmanTest.TempDir}) session.Wait(120) - Expect(session).Should(Exit(5)) + Expect(session).Should(ExitWithError(5, `building at STEP "RUN exit 5": while running runtime: exit status 5`)) }) It("podman build and check identity", func() { @@ -398,8 +415,7 @@ COPY /emptydir/* /dir`, CITEST_IMAGE) // NOTE: Docker and buildah both should error when `COPY /* /dir` is done on emptydir // as context. However buildkit simply ignores this so when buildah also starts ignoring // for such case edit this test to return 0 and check that no `/dir` should be in the result. - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("can't make relative to")) + Expect(session).Should(ExitWithError(125, "can't make relative to")) }) It("podman remote test container/docker file is not inside context dir", func() { @@ -441,7 +457,7 @@ RUN find /test`, CITEST_IMAGE) It("podman remote build must not allow symlink for ignore files", func() { // Create a random file where symlink must be resolved // but build should not be able to access it. - privateFile := filepath.Join("/tmp", "private_file") + privateFile := filepath.Join(podmanTest.TempDir, "private_file") f, err := os.Create(privateFile) Expect(err).ToNot(HaveOccurred()) // Mark hello to be ignored in outerfile, but it should not be ignored. @@ -449,16 +465,14 @@ RUN find /test`, CITEST_IMAGE) Expect(err).ToNot(HaveOccurred()) defer f.Close() - // Create .dockerignore which is a symlink to /tmp/private_file. + // Create .dockerignore which is a symlink to /tmp/.../private_file outside of the context dir. currentDir, err := os.Getwd() Expect(err).ToNot(HaveOccurred()) ignoreFile := filepath.Join(currentDir, "build/containerignore-symlink/.dockerignore") err = os.Symlink(privateFile, ignoreFile) Expect(err).ToNot(HaveOccurred()) // Remove created .dockerignore for this test when test ends. - defer func() { - os.Remove(ignoreFile) - }() + defer os.Remove(ignoreFile) if IsRemote() { podmanTest.StopRemoteService() @@ -759,7 +773,7 @@ RUN grep CapEff /proc/self/status` }) session.WaitWithDefaultTimeout() // Then - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, `unrecognized isolation type "bogus"`)) }) It("podman build --timestamp flag", func() { @@ -843,7 +857,7 @@ RUN ls /dev/fuse`, CITEST_IMAGE) Expect(err).ToNot(HaveOccurred()) session := podmanTest.Podman([]string{"build", "--pull-never", "-t", "test", "--file", containerfilePath, podmanTest.TempDir}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, `building at STEP "RUN ls /dev/fuse": while running runtime: exit status 1`)) session = podmanTest.Podman([]string{"build", "--pull-never", "--device", "/dev/fuse", "-t", "test", "--file", containerfilePath, podmanTest.TempDir}) session.WaitWithDefaultTimeout() @@ -859,7 +873,7 @@ RUN ls /dev/test1`, CITEST_IMAGE) Expect(err).ToNot(HaveOccurred()) session := podmanTest.Podman([]string{"build", "--pull-never", "-t", "test", "--file", containerfilePath, podmanTest.TempDir}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, `building at STEP "RUN ls /dev/test1": while running runtime: exit status 1`)) session = podmanTest.Podman([]string{"build", "--pull-never", "--device", "/dev/zero:/dev/test1", "-t", "test", "--file", containerfilePath, podmanTest.TempDir}) session.WaitWithDefaultTimeout() @@ -898,6 +912,6 @@ RUN ls /dev/test1`, CITEST_IMAGE) session = podmanTest.Podman([]string{"build", "--pull-never", "--file", "build/cache/Dockerfilecacheread", "build/cache/"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, `building at STEP "RUN --mount=type=cache,target=/test,z cat /test/world": while running runtime: exit status 1`)) }) }) diff --git a/test/e2e/checkpoint_image_test.go b/test/e2e/checkpoint_image_test.go index 8a4a10b0ea..093c873092 100644 --- a/test/e2e/checkpoint_image_test.go +++ b/test/e2e/checkpoint_image_test.go @@ -35,8 +35,7 @@ var _ = Describe("Podman checkpoint", func() { checkpointImage := "foobar-checkpoint" session := podmanTest.Podman([]string{"container", "checkpoint", "--create-image", checkpointImage, "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("no container with name or ID \"foobar\" found")) + Expect(session).To(ExitWithError(125, `no container with name or ID "foobar" found: no such container`)) }) It("podman checkpoint --create-image with running container", func() { diff --git a/test/e2e/checkpoint_test.go b/test/e2e/checkpoint_test.go index b2000c5269..58198fe37e 100644 --- a/test/e2e/checkpoint_test.go +++ b/test/e2e/checkpoint_test.go @@ -64,15 +64,13 @@ var _ = Describe("Podman checkpoint", func() { It("podman checkpoint bogus container", func() { session := podmanTest.Podman([]string{"container", "checkpoint", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("no such container")) + Expect(session).Should(ExitWithError(125, "no such container")) }) It("podman restore bogus container", func() { session := podmanTest.Podman([]string{"container", "restore", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("no such container or image")) + Expect(session).Should(ExitWithError(125, "no such container or image")) }) It("podman checkpoint a running container by id", func() { @@ -228,7 +226,7 @@ var _ = Describe("Podman checkpoint", func() { result = podmanTest.Podman([]string{"pause", cid}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + Expect(result).Should(ExitWithError(125, `"exited" is not running, can't pause: container state improper`)) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Exited")) @@ -239,7 +237,7 @@ var _ = Describe("Podman checkpoint", func() { result = podmanTest.Podman([]string{"rm", cid}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(2)) + Expect(result).Should(ExitWithError(2, " as it is running - running or paused containers cannot be removed without force: container state improper")) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) result = podmanTest.Podman([]string{"rm", "-t", "1", "-f", cid}) @@ -354,7 +352,9 @@ var _ = Describe("Podman checkpoint", func() { result := podmanTest.Podman([]string{"container", "checkpoint", cid}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + // FIXME: criu emits an error message, but podman never sees it: + // "CRIU checkpointing failed -52. Please check CRIU logfile /...." + Expect(result).Should(ExitWithError(125, "failed: exit status 1")) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up")) @@ -370,7 +370,16 @@ var _ = Describe("Podman checkpoint", func() { result = podmanTest.Podman([]string{"container", "restore", cid}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + // default message when using crun + expectStderr := "crun: CRIU restoring failed -52. Please check CRIU logfile" + if podmanTest.OCIRuntime == "runc" { + expectStderr = "runc: criu failed: type NOTIFY errno 0" + } + if !IsRemote() { + // This part is only seen with podman local, never remote + expectStderr = "OCI runtime error: " + expectStderr + } + Expect(result).Should(ExitWithError(125, expectStderr)) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Exited")) @@ -618,8 +627,7 @@ var _ = Describe("Podman checkpoint", func() { result = podmanTest.Podman([]string{"container", "checkpoint", cid, "-e", fileName, "-c", "non-existing"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring("not supported")) + Expect(result).Should(ExitWithError(125, `selected compression algorithm ("non-existing") not supported. Please select one from`)) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) Expect(podmanTest.NumberOfContainers()).To(Equal(1)) @@ -731,8 +739,7 @@ var _ = Describe("Podman checkpoint", func() { // Verify the changes to the container's root file-system result = podmanTest.Podman([]string{"exec", cid, "cat", "/test.output"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(1)) - Expect(result.ErrorToString()).To(ContainSubstring("cat: can't open '/test.output': No such file or directory")) + Expect(result).Should(ExitWithError(1, "cat: can't open '/test.output': No such file or directory")) // Remove exported checkpoint os.Remove(fileName) @@ -773,8 +780,7 @@ var _ = Describe("Podman checkpoint", func() { // Verify the changes to the container's root file-system result = podmanTest.Podman([]string{"exec", cid, "cat", "/test.output"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(1)) - Expect(result.ErrorToString()).To(ContainSubstring("cat: can't open '/test.output': No such file or directory")) + Expect(result).Should(ExitWithError(1, "cat: can't open '/test.output': No such file or directory")) // Remove exported checkpoint os.Remove(fileName) @@ -833,8 +839,7 @@ var _ = Describe("Podman checkpoint", func() { // Checkpoint the container - this should fail as it was started with --rm result := podmanTest.Podman([]string{"container", "checkpoint", cid}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) - Expect(result.ErrorToString()).To(ContainSubstring("cannot checkpoint containers that have been started with '--rm'")) + Expect(result).To(ExitWithError(125, "cannot checkpoint containers that have been started with '--rm'")) // Checkpointing with --export should still work fileName := filepath.Join(podmanTest.TempDir, "/checkpoint-"+cid+".tar.gz") @@ -922,10 +927,7 @@ var _ = Describe("Podman checkpoint", func() { // Restore container should fail because named volume still exists result = podmanTest.Podman([]string{"container", "restore", "-i", checkpointFileName}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) - Expect(result.ErrorToString()).To(ContainSubstring( - "volume with name my-test-vol already exists. Use --ignore-volumes to not restore content of volumes", - )) + Expect(result).To(ExitWithError(125, "volume with name my-test-vol already exists. Use --ignore-volumes to not restore content of volumes")) // Remove named volume session = podmanTest.Podman([]string{"volume", "rm", "my-test-vol"}) @@ -1108,28 +1110,28 @@ var _ = Describe("Podman checkpoint", func() { "net,uts", "uts,pid", } - for _, share := range namespaceCombination { + for index, share := range namespaceCombination { testName := fmt.Sprintf( "podman checkpoint and restore container out of and into pod (%s)", share, ) share := share // copy into local scope, for use inside function + index := index It(testName, func() { + podName := "test_pod" + if err := criu.CheckForCriu(criu.PodCriuVersion); err != nil { Skip(fmt.Sprintf("check CRIU pod version error: %v", err)) } + if !crutils.CRRuntimeSupportsPodCheckpointRestore(podmanTest.OCIRuntime) { Skip("runtime does not support pod restore: " + podmanTest.OCIRuntime) } + // Create a pod - session := podmanTest.Podman([]string{ - "pod", - "create", - "--share", - share, - }) + session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--share", share}) session.WaitWithDefaultTimeout() Expect(session).To(ExitCleanly()) podID := session.OutputToString() @@ -1185,12 +1187,7 @@ var _ = Describe("Podman checkpoint", func() { wrongShare := share[:strings.LastIndex(share, ",")] - session = podmanTest.Podman([]string{ - "pod", - "create", - "--share", - wrongShare, - }) + session = podmanTest.Podman([]string{"pod", "create", "--name", podName, "--share", wrongShare}) session.WaitWithDefaultTimeout() Expect(session).To(ExitCleanly()) podID = session.OutputToString() @@ -1205,8 +1202,7 @@ var _ = Describe("Podman checkpoint", func() { fileName, }) result.WaitWithDefaultTimeout() - Expect(result).To(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring("does not share the")) + Expect(result).To(ExitWithError(125, "does not share the ")) // Remove the pod and create a new pod result = podmanTest.Podman([]string{ @@ -1217,25 +1213,18 @@ var _ = Describe("Podman checkpoint", func() { result.WaitWithDefaultTimeout() Expect(result).To(ExitCleanly()) - session = podmanTest.Podman([]string{ - "pod", - "create", - "--share", - share, - }) + session = podmanTest.Podman([]string{"pod", "create", "--name", podName, "--share", share}) session.WaitWithDefaultTimeout() Expect(session).To(ExitCleanly()) podID = session.OutputToString() - // Restore container with different port mapping - result = podmanTest.Podman([]string{ - "container", - "restore", - "--pod", - podID, - "-i", - fileName, - }) + // Restore container into Pod. + // Verify that restore works with both Pod name and ID. + podArg := podName + if index%2 == 1 { + podArg = podID + } + result = podmanTest.Podman([]string{"container", "restore", "--pod", podArg, "-i", fileName}) result.WaitWithDefaultTimeout() Expect(result).To(ExitCleanly()) @@ -1449,16 +1438,16 @@ var _ = Describe("Podman checkpoint", func() { }) It("podman checkpoint and restore container with --file-locks", func() { - localRunString := getRunString([]string{"--name", "test_name", ALPINE, "flock", "test.lock", "sleep", "100"}) + localRunString := getRunString([]string{"--name", "test_name", ALPINE, "flock", "test.lock", "sh", "-c", "echo READY;sleep 100"}) session := podmanTest.Podman(localRunString) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) + Expect(WaitContainerReady(podmanTest, "test_name", "READY", 5, 1)).To(BeTrue(), "Timed out waiting for READY") // Checkpoint is expected to fail without --file-locks result := podmanTest.Podman([]string{"container", "checkpoint", "test_name"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring("failed: exit status 1")) + Expect(result).Should(ExitWithError(125, "failed: exit status 1")) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) // Checkpoint is expected to succeed with --file-locks @@ -1702,10 +1691,7 @@ var _ = Describe("Podman checkpoint", func() { }) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To( - ContainSubstring("and cannot be restored with runtime"), - ) + Expect(result).Should(ExitWithError(125, "and cannot be restored with runtime")) result = podmanTest.Podman([]string{ "--runtime", diff --git a/test/e2e/cleanup_test.go b/test/e2e/cleanup_test.go index d43e6405ab..10bdd9d92c 100644 --- a/test/e2e/cleanup_test.go +++ b/test/e2e/cleanup_test.go @@ -4,7 +4,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman container cleanup", func() { @@ -16,8 +15,7 @@ var _ = Describe("Podman container cleanup", func() { It("podman cleanup bogus container", func() { session := podmanTest.Podman([]string{"container", "cleanup", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("no such container")) + Expect(session).Should(ExitWithError(125, `no container with name or ID "foobar" found: no such container`)) }) It("podman cleanup container by id", func() { @@ -88,8 +86,7 @@ var _ = Describe("Podman container cleanup", func() { Expect(session).Should(ExitCleanly()) session = podmanTest.Podman([]string{"container", "cleanup", "running"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("container state improper")) + Expect(session).Should(ExitWithError(125, "is running or paused, refusing to clean up: container state improper")) }) It("podman cleanup paused container", func() { @@ -102,8 +99,7 @@ var _ = Describe("Podman container cleanup", func() { Expect(session).Should(ExitCleanly()) session = podmanTest.Podman([]string{"container", "cleanup", "paused"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("container state improper")) + Expect(session).Should(ExitWithError(125, "is running or paused, refusing to clean up: container state improper")) // unpause so that the cleanup can stop the container, // otherwise it fails with container state improper diff --git a/test/e2e/commit_test.go b/test/e2e/commit_test.go index cf1081fb89..4ab55303b7 100644 --- a/test/e2e/commit_test.go +++ b/test/e2e/commit_test.go @@ -20,8 +20,7 @@ var _ = Describe("Podman commit", func() { session := podmanTest.Podman([]string{"commit", "test1", "--change", "BOGUS=foo", "foobar.com/test1-image:latest"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(HaveSuffix(`applying changes: processing change "BOGUS foo": did not understand change instruction "BOGUS foo"`)) + Expect(session).Should(ExitWithError(125, `applying changes: processing change "BOGUS foo": did not understand change instruction "BOGUS foo"`)) session = podmanTest.Podman([]string{"commit", "test1", "foobar.com/test1-image:latest"}) session.WaitWithDefaultTimeout() @@ -47,8 +46,7 @@ var _ = Describe("Podman commit", func() { // commit second time with --quiet, should not write to stderr session = podmanTest.Podman([]string{"commit", "--quiet", "bogus", "foobar.com/test1-image:latest"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(Equal("Error: no container with name or ID \"bogus\" found: no such container")) + Expect(session).Should(ExitWithError(125, `no container with name or ID "bogus" found: no such container`)) }) It("podman commit single letter container", func() { @@ -346,7 +344,7 @@ var _ = Describe("Podman commit", func() { session = podmanTest.Podman([]string{"run", "foobar.com/test1-image:latest", "cat", "/run/secrets/mysecret"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(1, "can't open '/run/secrets/mysecret': No such file or directory")) }) diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go index 586e884d7b..f617c3a586 100644 --- a/test/e2e/common_test.go +++ b/test/e2e/common_test.go @@ -12,6 +12,7 @@ import ( "os" "os/exec" "path/filepath" + "slices" "sort" "strconv" "strings" @@ -32,7 +33,6 @@ import ( . "github.com/onsi/gomega" . "github.com/onsi/gomega/gexec" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" "golang.org/x/sys/unix" ) @@ -62,6 +62,7 @@ type PodmanTestIntegration struct { TmpDir string } +var GlobalTmpDir string // Single top-level tmpdir for all tests var LockTmpDir string // PodmanSessionIntegration struct for command line session @@ -101,14 +102,14 @@ func TestLibpod(t *testing.T) { } var ( - tempdir string + tempdir string // Working dir for _one_ subtest err error podmanTest *PodmanTestIntegration safeIPOctets [2]uint8 timingsFile *os.File _ = BeforeEach(func() { - tempdir, err = CreateTempDirInTempDir() + tempdir, err = os.MkdirTemp(GlobalTmpDir, "subtest-") Expect(err).ToNot(HaveOccurred()) podmanTest = PodmanTestCreate(tempdir) podmanTest.Setup() @@ -130,29 +131,28 @@ var ( ) const ( - // lockdir - do not use directly use LockTmpDir + // lockdir - do not use directly; use LockTmpDir lockdir = "libpodlock" // imageCacheDir - do not use directly use ImageCacheDir imageCacheDir = "imagecachedir" ) var _ = SynchronizedBeforeSuite(func() []byte { - globalTmpDir := GinkgoT().TempDir() + globalTmpDir, err := os.MkdirTemp("", "podman-e2e-") + Expect(err).ToNot(HaveOccurred()) // make cache dir ImageCacheDir = filepath.Join(globalTmpDir, imageCacheDir) - if err := os.MkdirAll(ImageCacheDir, 0700); err != nil { - GinkgoWriter.Printf("%q\n", err) - os.Exit(1) - } + err = os.MkdirAll(ImageCacheDir, 0700) + Expect(err).ToNot(HaveOccurred()) // Cache images cwd, _ := os.Getwd() INTEGRATION_ROOT = filepath.Join(cwd, "../../") - podman := PodmanTestSetup(GinkgoT().TempDir()) + podman := PodmanTestSetup(filepath.Join(globalTmpDir, "image-init")) // Pull cirros but don't put it into the cache - pullImages := []string{CIRROS_IMAGE, fedoraToolbox, volumeTest} + pullImages := []string{CIRROS_IMAGE, volumeTest} pullImages = append(pullImages, CACHE_IMAGES...) for _, image := range pullImages { podman.createArtifact(image) @@ -177,16 +177,16 @@ var _ = SynchronizedBeforeSuite(func() []byte { podman.StopRemoteService() } - // remove temporary podman files, images are now cached in ImageCacheDir + // remove temporary podman files; images are now cached in ImageCacheDir rmAll(podman.PodmanBinary, podman.TempDir) return []byte(globalTmpDir) }, func(data []byte) { cwd, _ := os.Getwd() INTEGRATION_ROOT = filepath.Join(cwd, "../../") - globalTmpDir := string(data) - ImageCacheDir = filepath.Join(globalTmpDir, imageCacheDir) - LockTmpDir = filepath.Join(globalTmpDir, lockdir) + GlobalTmpDir = string(data) + ImageCacheDir = filepath.Join(GlobalTmpDir, imageCacheDir) + LockTmpDir = filepath.Join(GlobalTmpDir, lockdir) timingsFile, err = os.Create(fmt.Sprintf("%s/timings-%d", LockTmpDir, GinkgoParallelProcess())) Expect(err).ToNot(HaveOccurred()) @@ -229,7 +229,7 @@ var _ = SynchronizedAfterSuite(func() { } cwd, _ := os.Getwd() - rmAll(getPodmanBinary(cwd), ImageCacheDir) + rmAll(getPodmanBinary(cwd), GlobalTmpDir) }) func getPodmanBinary(cwd string) string { @@ -242,43 +242,38 @@ func getPodmanBinary(cwd string) string { // PodmanTestCreate creates a PodmanTestIntegration instance for the tests func PodmanTestCreateUtil(tempDir string, remote bool) *PodmanTestIntegration { - var podmanRemoteBinary string - host := GetHostDistributionInfo() cwd, _ := os.Getwd() root := filepath.Join(tempDir, "root") podmanBinary := getPodmanBinary(cwd) - podmanRemoteBinary = filepath.Join(cwd, "../../bin/podman-remote") - if os.Getenv("PODMAN_REMOTE_BINARY") != "" { - podmanRemoteBinary = os.Getenv("PODMAN_REMOTE_BINARY") + podmanRemoteBinary := os.Getenv("PODMAN_REMOTE_BINARY") + if podmanRemoteBinary == "" { + podmanRemoteBinary = filepath.Join(cwd, "../../bin/podman-remote") } - quadletBinary := filepath.Join(cwd, "../../bin/quadlet") - if os.Getenv("QUADLET_BINARY") != "" { - quadletBinary = os.Getenv("QUADLET_BINARY") + quadletBinary := os.Getenv("QUADLET_BINARY") + if quadletBinary == "" { + quadletBinary = filepath.Join(cwd, "../../bin/quadlet") } - conmonBinary := "/usr/libexec/podman/conmon" - altConmonBinary := "/usr/bin/conmon" - if _, err := os.Stat(conmonBinary); os.IsNotExist(err) { - conmonBinary = altConmonBinary - } - if os.Getenv("CONMON_BINARY") != "" { - conmonBinary = os.Getenv("CONMON_BINARY") - } - storageOptions := STORAGE_OPTIONS - if os.Getenv("STORAGE_OPTIONS") != "" { - storageOptions = os.Getenv("STORAGE_OPTIONS") + conmonBinary := os.Getenv("CONMON_BINARY") + if conmonBinary == "" { + conmonBinary = "/usr/libexec/podman/conmon" + if _, err := os.Stat(conmonBinary); errors.Is(err, os.ErrNotExist) { + conmonBinary = "/usr/bin/conmon" + } } - cgroupManager := CGROUP_MANAGER - if isRootless() { - cgroupManager = "cgroupfs" + storageOptions := os.Getenv("STORAGE_OPTIONS") + if storageOptions == "" { + storageOptions = STORAGE_OPTIONS } - if os.Getenv("CGROUP_MANAGER") != "" { - cgroupManager = os.Getenv("CGROUP_MANAGER") + + cgroupManager := os.Getenv("CGROUP_MANAGER") + if cgroupManager == "" { + cgroupManager = CGROUP_MANAGER } ociRuntime := os.Getenv("OCI_RUNTIME") @@ -393,10 +388,8 @@ func (p PodmanTestIntegration) AddImageToRWStore(image string) { func imageTarPath(image string) string { cacheDir := os.Getenv("PODMAN_TEST_IMAGE_CACHE_DIR") if cacheDir == "" { - cacheDir = os.Getenv("TMPDIR") - if cacheDir == "" { - cacheDir = "/tmp" - } + // Avoid /tmp: it may be tmpfs, and these images are large + cacheDir = "/var/tmp" } // e.g., registry.com/fubar:latest -> registry.com-fubar-latest.tar @@ -454,6 +447,31 @@ func (p *PodmanTestIntegration) InspectContainer(name string) []define.InspectCo return session.InspectContainerToJSON() } +// Pull a single field from a container using `podman inspect --format {{ field }}`, +// and verify it against the given expected value. +func (p *PodmanTestIntegration) CheckContainerSingleField(name, field, expected string) { + inspect := p.Podman([]string{"inspect", "--format", fmt.Sprintf("{{ %s }}", field), name}) + inspect.WaitWithDefaultTimeout() + ExpectWithOffset(1, inspect).Should(Exit(0)) + ExpectWithOffset(1, inspect.OutputToString()).To(Equal(expected)) +} + +// Check that the contents of a single file in the given container matches the expected value. +func (p *PodmanTestIntegration) CheckFileInContainer(name, filepath, expected string) { + exec := p.Podman([]string{"exec", name, "cat", filepath}) + exec.WaitWithDefaultTimeout() + ExpectWithOffset(1, exec).Should(Exit(0)) + ExpectWithOffset(1, exec.OutputToString()).To(Equal(expected)) +} + +// Check that the contents of a single file in the given container containers the given value. +func (p *PodmanTestIntegration) CheckFileInContainerSubstring(name, filepath, expected string) { + exec := p.Podman([]string{"exec", name, "cat", filepath}) + exec.WaitWithDefaultTimeout() + ExpectWithOffset(1, exec).Should(Exit(0)) + ExpectWithOffset(1, exec.OutputToString()).To(ContainSubstring(expected)) +} + // StopContainer stops a container with no timeout, ensuring a fast test. func (p *PodmanTestIntegration) StopContainer(nameOrID string) { stop := p.Podman([]string{"stop", "-t0", nameOrID}) @@ -675,8 +693,20 @@ func (p *PodmanTestIntegration) Cleanup() { // Make sure to only check exit codes after all cleanup is done. // An error would cause it to stop and return early otherwise. Expect(stop).To(Exit(0), "command: %v\nstdout: %s\nstderr: %s", stop.Command.Args, stop.OutputToString(), stop.ErrorToString()) + checkStderrCleanupError(stop, "stop --all -t0 error logged") Expect(podrm).To(Exit(0), "command: %v\nstdout: %s\nstderr: %s", podrm.Command.Args, podrm.OutputToString(), podrm.ErrorToString()) + checkStderrCleanupError(podrm, "pod rm -fa -t0 error logged") Expect(rmall).To(Exit(0), "command: %v\nstdout: %s\nstderr: %s", rmall.Command.Args, rmall.OutputToString(), rmall.ErrorToString()) + checkStderrCleanupError(rmall, "rm -fa -t0 error logged") +} + +func checkStderrCleanupError(s *PodmanSessionIntegration, cmd string) { + if strings.Contains(podmanTest.OCIRuntime, "runc") { + // we cannot check stderr for runc, way to many errors + return + } + // offset is 1 so the stacj trace doesn't link to this helper function here + ExpectWithOffset(1, s.ErrorToString()).To(BeEmpty(), cmd) } // CleanupVolume cleans up the volumes and containers. @@ -686,6 +716,7 @@ func (p *PodmanTestIntegration) CleanupVolume() { session := p.Podman([]string{"volume", "rm", "-fa"}) session.WaitWithDefaultTimeout() Expect(session).To(Exit(0), "command: %v\nstdout: %s\nstderr: %s", session.Command.Args, session.OutputToString(), session.ErrorToString()) + checkStderrCleanupError(session, "volume rm -fa error logged") } // CleanupSecret cleans up the secrets and containers. @@ -695,6 +726,7 @@ func (p *PodmanTestIntegration) CleanupSecrets() { session := p.Podman([]string{"secret", "rm", "-a"}) session.Wait(90) Expect(session).To(Exit(0), "command: %v\nstdout: %s\nstderr: %s", session.Command.Args, session.OutputToString(), session.ErrorToString()) + checkStderrCleanupError(session, "secret rm -a error logged") } // InspectContainerToJSON takes the session output of an inspect @@ -995,6 +1027,7 @@ func (p *PodmanTestIntegration) RestoreArtifactToCache(image string) error { p.Root = p.ImageCacheDir restore := p.PodmanNoEvents([]string{"load", "-q", "-i", tarball}) restore.WaitWithDefaultTimeout() + Expect(restore).To(ExitCleanly()) } return nil } @@ -1175,7 +1208,7 @@ func (p *PodmanTestIntegration) removeNetwork(name string) { // generatePolicyFile generates a signature verification policy file. // it returns the policy file path. -func generatePolicyFile(tempDir string) string { +func generatePolicyFile(tempDir string, port int) string { keyPath := filepath.Join(tempDir, "key.gpg") policyPath := filepath.Join(tempDir, "policy.json") conf := fmt.Sprintf(` @@ -1187,20 +1220,20 @@ func generatePolicyFile(tempDir string) string { ], "transports": { "docker": { - "localhost:5000": [ + "localhost:%[1]d": [ { "type": "signedBy", "keyType": "GPGKeys", - "keyPath": "%s" + "keyPath": "%[2]s" } ], - "localhost:5000/sigstore-signed": [ + "localhost:%[1]d/sigstore-signed": [ { "type": "sigstoreSigned", "keyPath": "testdata/sigstore-key.pub" } ], - "localhost:5000/sigstore-signed-params": [ + "localhost:%[1]d/sigstore-signed-params": [ { "type": "sigstoreSigned", "keyPath": "testdata/sigstore-key.pub" @@ -1209,7 +1242,7 @@ func generatePolicyFile(tempDir string) string { } } } -`, keyPath) +`, port, keyPath) writeConf([]byte(conf), policyPath) return policyPath } diff --git a/test/e2e/config.go b/test/e2e/config.go index 3ab956dd9a..4867734247 100644 --- a/test/e2e/config.go +++ b/test/e2e/config.go @@ -13,7 +13,6 @@ var ( INFRA_IMAGE = "quay.io/libpod/k8s-pause:3.5" //nolint:revive,stylecheck BB = "quay.io/libpod/busybox:latest" HEALTHCHECK_IMAGE = "quay.io/libpod/alpine_healthcheck:latest" //nolint:revive,stylecheck - fedoraToolbox = "registry.fedoraproject.org/fedora-toolbox:36" volumeTest = "quay.io/libpod/volume-plugin-test-img:20220623" // This image has seccomp profiles that blocks all syscalls. diff --git a/test/e2e/config_amd64.go b/test/e2e/config_amd64.go index 1ef7ec4aa3..27ad021b46 100644 --- a/test/e2e/config_amd64.go +++ b/test/e2e/config_amd64.go @@ -1,15 +1,15 @@ package integration var ( - STORAGE_FS = "overlay" //nolint:revive,stylecheck - STORAGE_OPTIONS = "--storage-driver overlay" //nolint:revive,stylecheck - ROOTLESS_STORAGE_FS = "overlay" //nolint:revive,stylecheck - ROOTLESS_STORAGE_OPTIONS = "--storage-driver overlay" //nolint:revive,stylecheck - CACHE_IMAGES = []string{ALPINE, BB, NGINX_IMAGE, REDIS_IMAGE, REGISTRY_IMAGE, INFRA_IMAGE, CITEST_IMAGE, HEALTHCHECK_IMAGE, SYSTEMD_IMAGE, fedoraToolbox} //nolint:revive,stylecheck - NGINX_IMAGE = "quay.io/libpod/alpine_nginx:latest" //nolint:revive,stylecheck - BB_GLIBC = "docker.io/library/busybox:glibc" //nolint:revive,stylecheck - REGISTRY_IMAGE = "quay.io/libpod/registry:2.8.2" //nolint:revive,stylecheck - CITEST_IMAGE = "quay.io/libpod/testimage:20240123" //nolint:revive,stylecheck - SYSTEMD_IMAGE = "quay.io/libpod/systemd-image:20240124" //nolint:revive,stylecheck - CIRROS_IMAGE = "quay.io/libpod/cirros:latest" //nolint:revive,stylecheck + STORAGE_FS = "overlay" //nolint:revive,stylecheck + STORAGE_OPTIONS = "--storage-driver overlay" //nolint:revive,stylecheck + ROOTLESS_STORAGE_FS = "overlay" //nolint:revive,stylecheck + ROOTLESS_STORAGE_OPTIONS = "--storage-driver overlay" //nolint:revive,stylecheck + CACHE_IMAGES = []string{ALPINE, BB, NGINX_IMAGE, REDIS_IMAGE, REGISTRY_IMAGE, INFRA_IMAGE, CITEST_IMAGE, HEALTHCHECK_IMAGE, SYSTEMD_IMAGE} //nolint:revive,stylecheck + NGINX_IMAGE = "quay.io/libpod/alpine_nginx:latest" //nolint:revive,stylecheck + BB_GLIBC = "docker.io/library/busybox:glibc" //nolint:revive,stylecheck + REGISTRY_IMAGE = "quay.io/libpod/registry:2.8.2" //nolint:revive,stylecheck + CITEST_IMAGE = "quay.io/libpod/testimage:20240123" //nolint:revive,stylecheck + SYSTEMD_IMAGE = "quay.io/libpod/systemd-image:20240124" //nolint:revive,stylecheck + CIRROS_IMAGE = "quay.io/libpod/cirros:latest" //nolint:revive,stylecheck ) diff --git a/test/e2e/config_arm64.go b/test/e2e/config_arm64.go index 9ace0fc41f..3bb2d3600f 100644 --- a/test/e2e/config_arm64.go +++ b/test/e2e/config_arm64.go @@ -1,15 +1,15 @@ package integration var ( - STORAGE_FS = "overlay" //nolint:revive,stylecheck - STORAGE_OPTIONS = "--storage-driver overlay" //nolint:revive,stylecheck - ROOTLESS_STORAGE_FS = "overlay" //nolint:revive,stylecheck - ROOTLESS_STORAGE_OPTIONS = "--storage-driver overlay" //nolint:revive,stylecheck - CACHE_IMAGES = []string{ALPINE, BB, fedoraMinimal, NGINX_IMAGE, REDIS_IMAGE, REGISTRY_IMAGE, INFRA_IMAGE, CITEST_IMAGE, HEALTHCHECK_IMAGE, SYSTEMD_IMAGE, fedoraToolbox} //nolint:revive,stylecheck - NGINX_IMAGE = "quay.io/lsm5/alpine_nginx-aarch64:latest" //nolint:revive,stylecheck - BB_GLIBC = "docker.io/library/busybox:glibc" //nolint:revive,stylecheck - REGISTRY_IMAGE = "quay.io/libpod/registry:2.8.2" //nolint:revive,stylecheck - CITEST_IMAGE = "quay.io/libpod/testimage:20240123" //nolint:revive,stylecheck - SYSTEMD_IMAGE = "quay.io/libpod/systemd-image:20240124" //nolint:revive,stylecheck - CIRROS_IMAGE = "quay.io/libpod/cirros:latest" //nolint:revive,stylecheck + STORAGE_FS = "overlay" //nolint:revive,stylecheck + STORAGE_OPTIONS = "--storage-driver overlay" //nolint:revive,stylecheck + ROOTLESS_STORAGE_FS = "overlay" //nolint:revive,stylecheck + ROOTLESS_STORAGE_OPTIONS = "--storage-driver overlay" //nolint:revive,stylecheck + CACHE_IMAGES = []string{ALPINE, BB, fedoraMinimal, NGINX_IMAGE, REDIS_IMAGE, REGISTRY_IMAGE, INFRA_IMAGE, CITEST_IMAGE, HEALTHCHECK_IMAGE, SYSTEMD_IMAGE} //nolint:revive,stylecheck + NGINX_IMAGE = "quay.io/lsm5/alpine_nginx-aarch64:latest" //nolint:revive,stylecheck + BB_GLIBC = "docker.io/library/busybox:glibc" //nolint:revive,stylecheck + REGISTRY_IMAGE = "quay.io/libpod/registry:2.8.2" //nolint:revive,stylecheck + CITEST_IMAGE = "quay.io/libpod/testimage:20240123" //nolint:revive,stylecheck + SYSTEMD_IMAGE = "quay.io/libpod/systemd-image:20240124" //nolint:revive,stylecheck + CIRROS_IMAGE = "quay.io/libpod/cirros:latest" //nolint:revive,stylecheck ) diff --git a/test/e2e/container_iface_name_test.go b/test/e2e/container_iface_name_test.go index d9f059f9ba..a0030b94e8 100644 --- a/test/e2e/container_iface_name_test.go +++ b/test/e2e/container_iface_name_test.go @@ -9,13 +9,10 @@ import ( . "github.com/onsi/gomega" ) -func isDebianRunc(pTest *PodmanTestIntegration) bool { +// FIXME 2024-05-14: "Debian" here is a proxy for "netavark < 1.10" +func isDebian() bool { info := GetHostDistributionInfo() - if info.Distribution == "debian" && pTest.OCIRuntime == "runc" { - return true - } - - return false + return info.Distribution == "debian" } func createNetworkDevice(name string) { @@ -104,7 +101,7 @@ var _ = Describe("Podman container interface name", func() { } for _, driverType := range []string{"macvlan", "ipvlan"} { - if driverType == "ipvlan" && isDebianRunc(podmanTest) { + if driverType == "ipvlan" && isDebian() { GinkgoWriter.Println("FIXME: Fails with netavark < 1.10. Re-enable once Debian gets an update") continue } @@ -157,7 +154,7 @@ var _ = Describe("Podman container interface name", func() { SkipIfRootless("cannot create network device in rootless mode.") for _, driverType := range []string{"macvlan", "ipvlan"} { - if driverType == "ipvlan" && isDebianRunc(podmanTest) { + if driverType == "ipvlan" && isDebian() { GinkgoWriter.Println("FIXME: Fails with netavark < 1.10. Re-enable once Debian gets an update") continue } @@ -223,7 +220,7 @@ var _ = Describe("Podman container interface name", func() { createContainersConfFileWithDeviceIfaceName(podmanTest) for _, driverType := range []string{"macvlan", "ipvlan"} { - if driverType == "ipvlan" && isDebianRunc(podmanTest) { + if driverType == "ipvlan" && isDebian() { GinkgoWriter.Println("FIXME: Fails with netavark < 1.10. Re-enable once Debian gets an update") continue } diff --git a/test/e2e/container_inspect_test.go b/test/e2e/container_inspect_test.go index a174988edb..2a3861ff86 100644 --- a/test/e2e/container_inspect_test.go +++ b/test/e2e/container_inspect_test.go @@ -26,7 +26,7 @@ var _ = Describe("Podman container inspect", func() { It("podman inspect shows exposed ports", func() { name := "testcon" - session := podmanTest.Podman([]string{"run", "-d", "--stop-timeout", "0", "--expose", "8787/udp", "--name", name, ALPINE, "sleep", "inf"}) + session := podmanTest.Podman([]string{"run", "-d", "--stop-timeout", "0", "--expose", "8787/udp", "--name", name, ALPINE, "sleep", "100"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) data := podmanTest.InspectContainer(name) @@ -34,6 +34,11 @@ var _ = Describe("Podman container inspect", func() { Expect(data).To(HaveLen(1)) Expect(data[0].NetworkSettings.Ports). To(Equal(map[string][]define.InspectHostPort{"8787/udp": nil})) + + session = podmanTest.Podman([]string{"ps", "--format", "{{.Ports}}"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(ExitCleanly()) + Expect(session.OutputToString()).To(Equal("8787/udp")) }) It("podman inspect shows exposed ports on image", func() { @@ -46,6 +51,11 @@ var _ = Describe("Podman container inspect", func() { Expect(data).To(HaveLen(1)) Expect(data[0].NetworkSettings.Ports). To(Equal(map[string][]define.InspectHostPort{"80/tcp": nil, "8989/tcp": nil})) + + session = podmanTest.Podman([]string{"ps", "--format", "{{.Ports}}"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(ExitCleanly()) + Expect(session.OutputToString()).To(Equal("80/tcp, 8989/tcp")) }) It("podman inspect shows volumes-from with mount options", func() { diff --git a/test/e2e/containers_conf_test.go b/test/e2e/containers_conf_test.go index 56d7f7eb84..129c889bcf 100644 --- a/test/e2e/containers_conf_test.go +++ b/test/e2e/containers_conf_test.go @@ -441,8 +441,7 @@ var _ = Describe("Verify podman containers.conf usage", func() { It("--add-host and no-hosts=true fails", func() { session := podmanTest.Podman([]string{"run", "-dt", "--add-host", "test1:127.0.0.1", ALPINE, "top"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("--no-hosts and --add-host cannot be set together")) + Expect(session).To(ExitWithError(125, "--no-hosts and --add-host cannot be set together")) session = podmanTest.Podman([]string{"run", "-dt", "--add-host", "test1:127.0.0.1", "--no-hosts=false", ALPINE, "top"}) session.WaitWithDefaultTimeout() @@ -533,8 +532,7 @@ var _ = Describe("Verify podman containers.conf usage", func() { if !IsRemote() { session = podmanTest.Podman([]string{"info", "--format", "{{.Store.ImageCopyTmpDir}}"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("invalid image_copy_tmp_dir value \"storage1\" (relative paths are not accepted)")) + Expect(session).Should(ExitWithError(125, `invalid image_copy_tmp_dir value "storage1" (relative paths are not accepted)`)) os.Setenv("TMPDIR", "/hoge") session = podmanTest.Podman([]string{"info", "--format", "{{.Store.ImageCopyTmpDir}}"}) @@ -573,18 +571,15 @@ var _ = Describe("Verify podman containers.conf usage", func() { result := podmanTest.Podman([]string{"pod", "create", "--infra-image", infra2}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring(error2String)) + Expect(result).Should(ExitWithError(125, error2String)) result = podmanTest.Podman([]string{"pod", "create"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring(errorString)) + Expect(result).Should(ExitWithError(125, errorString)) result = podmanTest.Podman([]string{"create", "--pod", "new:pod1", ALPINE}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring(errorString)) + Expect(result).Should(ExitWithError(125, errorString)) }) It("set .engine.remote=true", func() { @@ -679,8 +674,7 @@ var _ = Describe("Verify podman containers.conf usage", func() { podman.WaitWithDefaultTimeout() if mode == "invalid" { - Expect(podman).Should(Exit(125)) - Expect(podman.ErrorToString()).Should(ContainSubstring("invalid default_rootless_network_cmd option \"invalid\"")) + Expect(podman).Should(ExitWithError(125, `invalid default_rootless_network_cmd option "invalid"`)) continue } Expect(podman).Should(ExitCleanly()) diff --git a/test/e2e/cp_test.go b/test/e2e/cp_test.go index f63c778e13..4a2c7a5108 100644 --- a/test/e2e/cp_test.go +++ b/test/e2e/cp_test.go @@ -40,7 +40,7 @@ var _ = Describe("Podman cp", func() { // Cannot copy to a nonexistent path (note the trailing "/"). session = podmanTest.Podman([]string{"cp", srcFile.Name(), name + ":foo/"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `"foo/" could not be found on container`)) // The file will now be created (and written to). session = podmanTest.Podman([]string{"cp", srcFile.Name(), name + ":foo"}) diff --git a/test/e2e/create_staticip_test.go b/test/e2e/create_staticip_test.go index df5aefc85f..fba85cbf72 100644 --- a/test/e2e/create_staticip_test.go +++ b/test/e2e/create_staticip_test.go @@ -1,12 +1,12 @@ package integration import ( + "fmt" "time" . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman create with --ip flag", func() { @@ -14,7 +14,7 @@ var _ = Describe("Podman create with --ip flag", func() { It("Podman create --ip with garbage address", func() { result := podmanTest.Podman([]string{"create", "--name", "test", "--ip", "114232346", ALPINE, "ls"}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) + Expect(result).To(ExitWithError(125, `"114232346" is not an ip address`)) }) It("Podman create --ip with non-allocatable IP", func() { @@ -25,7 +25,7 @@ var _ = Describe("Podman create with --ip flag", func() { result = podmanTest.Podman([]string{"start", "test"}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) + Expect(result).To(ExitWithError(125, "requested static ip 203.0.113.124 not in any subnet on network podman")) }) It("Podman create with specified static IP has correct IP", func() { @@ -34,7 +34,7 @@ var _ = Describe("Podman create with --ip flag", func() { result.WaitWithDefaultTimeout() // Rootless static ip assignment without network should error if isRootless() { - Expect(result).Should(Exit(125)) + Expect(result).Should(ExitWithError(125, "invalid config provided: networks and static ip/mac address can only be used with Bridge mode networking")) } else { Expect(result).Should(ExitCleanly()) @@ -74,11 +74,6 @@ var _ = Describe("Podman create with --ip flag", func() { // test1 container is running with the given IP. result = podmanTest.Podman([]string{"start", "-a", "test2"}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) - if podmanTest.NetworkBackend == CNI { - Expect(result.ErrorToString()).To(ContainSubstring("requested IP address %s is not available", ip)) - } else if podmanTest.NetworkBackend == Netavark { - Expect(result.ErrorToString()).To(ContainSubstring("requested ip address %s is already allocated", ip)) - } + Expect(result).To(ExitWithError(125, fmt.Sprintf("requested ip address %s is already allocated", ip))) }) }) diff --git a/test/e2e/create_staticmac_test.go b/test/e2e/create_staticmac_test.go index 35b1f7cbe6..d020e16530 100644 --- a/test/e2e/create_staticmac_test.go +++ b/test/e2e/create_staticmac_test.go @@ -5,7 +5,6 @@ import ( "github.com/containers/storage/pkg/stringid" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman run with --mac-address flag", func() { @@ -14,7 +13,7 @@ var _ = Describe("Podman run with --mac-address flag", func() { result := podmanTest.Podman([]string{"run", "--mac-address", "92:d0:c6:0a:29:34", ALPINE, "ip", "addr"}) result.WaitWithDefaultTimeout() if isRootless() { - Expect(result).Should(Exit(125)) + Expect(result).Should(ExitWithError(125, "invalid config provided: networks and static ip/mac address can only be used with Bridge mode networking")) } else { Expect(result).Should(ExitCleanly()) Expect(result.OutputToString()).To(ContainSubstring("92:d0:c6:0a:29:34")) diff --git a/test/e2e/create_test.go b/test/e2e/create_test.go index 6d5b82232b..646ff679d9 100644 --- a/test/e2e/create_test.go +++ b/test/e2e/create_test.go @@ -60,9 +60,8 @@ var _ = Describe("Podman create", func() { create := podmanTest.Podman([]string{"container", "create", pushedImage}) create.WaitWithDefaultTimeout() - Expect(create).Should(Exit(125)) + Expect(create).Should(ExitWithError(125, "http: server gave HTTP response to HTTPS client")) Expect(create.ErrorToString()).To(ContainSubstring("pinging container registry localhost:" + port)) - Expect(create.ErrorToString()).To(ContainSubstring("http: server gave HTTP response to HTTPS client")) create = podmanTest.Podman([]string{"create", "--tls-verify=false", pushedImage, "echo", "got here"}) create.WaitWithDefaultTimeout() @@ -85,7 +84,7 @@ var _ = Describe("Podman create", func() { session = podmanTest.Podman([]string{"create", "--name=foo", ALPINE, "ls"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, `creating container storage: the container name "foo" is already in use by`)) }) It("podman create adds rdt-class", func() { @@ -121,7 +120,7 @@ var _ = Describe("Podman create", func() { result := podmanTest.Podman([]string{"inspect", "entrypoint_test", "--format", "{{.Config.Entrypoint}}"}) result.WaitWithDefaultTimeout() Expect(result).Should(ExitCleanly()) - Expect(result.OutputToString()).To(Equal("/bin/foobar")) + Expect(result.OutputToString()).To(Equal("[/bin/foobar]")) }) It("podman create --entrypoint \"\"", func() { @@ -133,7 +132,7 @@ var _ = Describe("Podman create", func() { result := podmanTest.Podman([]string{"inspect", session.OutputToString(), "--format", "{{.Config.Entrypoint}}"}) result.WaitWithDefaultTimeout() Expect(result).Should(ExitCleanly()) - Expect(result.OutputToString()).To(Equal("")) + Expect(result.OutputToString()).To(Equal("[]")) }) It("podman create --entrypoint json", func() { @@ -146,7 +145,7 @@ var _ = Describe("Podman create", func() { result := podmanTest.Podman([]string{"inspect", "entrypoint_json", "--format", "{{.Config.Entrypoint}}"}) result.WaitWithDefaultTimeout() Expect(result).Should(ExitCleanly()) - Expect(result.OutputToString()).To(Equal("/bin/foo -c")) + Expect(result.OutputToString()).To(Equal("[/bin/foo -c]")) }) It("podman create --mount flag with multiple mounts", func() { @@ -227,13 +226,11 @@ var _ = Describe("Podman create", func() { // if used together. session := podmanTest.Podman([]string{"create", "--pod", "foo", "--pod-id-file", "bar", ALPINE, "ls"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - - tmpDir := GinkgoT().TempDir() + Expect(session).Should(ExitWithError(125, "cannot specify both --pod and --pod-id-file")) podName := "rudolph" ctrName := "prancer" - podIDFile := tmpDir + "pod-id-file" + podIDFile := filepath.Join(tempdir, "pod-id-file") // Now, let's create a pod with --pod-id-file. session = podmanTest.Podman([]string{"pod", "create", "--pod-id-file", podIDFile, "--name", podName}) @@ -266,13 +263,14 @@ var _ = Describe("Podman create", func() { Expect(ctrJSON).To(HaveLen(1)) Expect(ctrJSON[0].Config.Cmd).To(HaveLen(1)) Expect(ctrJSON[0].Config.Cmd[0]).To(Equal("redis-server")) - Expect(ctrJSON[0].Config).To(HaveField("Entrypoint", "docker-entrypoint.sh")) + Expect(ctrJSON[0].Config.Entrypoint).To(HaveLen(1)) + Expect(ctrJSON[0].Config.Entrypoint[0]).To(Equal("docker-entrypoint.sh")) }) It("podman create --pull", func() { session := podmanTest.Podman([]string{"create", "--pull", "never", "--name=foo", "testimage:00000000"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "testimage:00000000: image not known")) session = podmanTest.Podman([]string{"create", "--pull", "always", "--name=foo", "testimage:00000000"}) session.WaitWithDefaultTimeout() @@ -341,23 +339,23 @@ var _ = Describe("Podman create", func() { bogus := filepath.Join(podmanTest.TempDir, "bogus.conf") session := podmanTest.Podman([]string{"create", "--authfile", bogus, "--name=foo", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "credential file is not accessible: ")) Expect(session.ErrorToString()).To(ContainSubstring("no such file or directory")) }) It("podman create --signature-policy", func() { session := podmanTest.Podman([]string{"create", "--pull=always", "--signature-policy", "/no/such/file", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - - session = podmanTest.Podman([]string{"create", "-q", "--pull=always", "--signature-policy", "/etc/containers/policy.json", ALPINE}) - session.WaitWithDefaultTimeout() if IsRemote() { - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("unknown flag")) + Expect(session).To(ExitWithError(125, "unknown flag: --signature-policy")) + return } else { - Expect(session).Should(ExitCleanly()) + Expect(session).To(ExitWithError(125, "open /no/such/file: no such file or directory")) } + + session = podmanTest.Podman([]string{"create", "-q", "--pull=always", "--signature-policy", "/etc/containers/policy.json", ALPINE}) + session.WaitWithDefaultTimeout() + Expect(session).Should(ExitCleanly()) }) It("podman create with unset label", func() { @@ -409,7 +407,7 @@ var _ = Describe("Podman create", func() { It("podman create with --restart-policy=always:5 fails", func() { session := podmanTest.Podman([]string{"create", "-t", "--restart", "always:5", ALPINE, "/bin/sh"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "restart policy retries can only be specified with on-failure restart policy")) }) It("podman create with --restart-policy unless-stopped", func() { @@ -461,7 +459,7 @@ var _ = Describe("Podman create", func() { // Make sure we error out with --name. session := podmanTest.Podman([]string{"create", "--replace", ALPINE, "/bin/sh"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "cannot replace container without --name being set")) // Create and replace 5 times in a row the "same" container. ctrName := "testCtr" @@ -482,21 +480,17 @@ var _ = Describe("Podman create", func() { inspect.WaitWithDefaultTimeout() data := inspect.InspectContainerToJSON() Expect(data).To(HaveLen(1)) - Expect(data[0].Config).To(HaveField("StopSignal", uint(15))) + Expect(data[0].Config).To(HaveField("StopSignal", "SIGTERM")) }) It("podman create --tz", func() { session := podmanTest.Podman([]string{"create", "--tz", "foo", "--name", "bad", ALPINE, "date"}) session.WaitWithDefaultTimeout() - Expect(session).To(Exit(125)) - Expect(session.ErrorToString()).To( - Equal("Error: running container create option: finding timezone: unknown time zone foo")) + Expect(session).To(ExitWithError(125, "running container create option: finding timezone: unknown time zone foo")) session = podmanTest.Podman([]string{"create", "--tz", "America", "--name", "dir", ALPINE, "date"}) session.WaitWithDefaultTimeout() - Expect(session).To(Exit(125)) - Expect(session.ErrorToString()).To( - Equal("Error: running container create option: finding timezone: is a directory")) + Expect(session).To(ExitWithError(125, "running container create option: finding timezone: is a directory")) session = podmanTest.Podman([]string{"create", "--tz", "Pacific/Honolulu", "--name", "zone", ALPINE, "date"}) session.WaitWithDefaultTimeout() @@ -554,8 +548,7 @@ var _ = Describe("Podman create", func() { session = podmanTest.Podman([]string{"create", "--umask", "9999", "--name", "bad", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("invalid umask")) + Expect(session).To(ExitWithError(125, "invalid umask string 9999: invalid argument")) }) It("create container in pod with IP should fail", func() { @@ -567,7 +560,7 @@ var _ = Describe("Podman create", func() { session := podmanTest.Podman([]string{"create", "--pod", name, "--ip", "192.168.1.2", ALPINE, "top"}) session.WaitWithDefaultTimeout() - Expect(session).Should(ExitWithError()) + Expect(session).Should(ExitWithError(125, "invalid config provided: networks must be defined when the pod is created: network cannot be configured when it is shared with a pod")) }) It("create container in pod with mac should fail", func() { @@ -579,7 +572,7 @@ var _ = Describe("Podman create", func() { session := podmanTest.Podman([]string{"create", "--pod", name, "--mac-address", "52:54:00:6d:2f:82", ALPINE, "top"}) session.WaitWithDefaultTimeout() - Expect(session).Should(ExitWithError()) + Expect(session).Should(ExitWithError(125, "invalid config provided: networks must be defined when the pod is created: network cannot be configured when it is shared with a pod")) }) It("create container in pod with network should not fail", func() { @@ -607,7 +600,7 @@ var _ = Describe("Podman create", func() { session := podmanTest.Podman([]string{"create", "--pod", name, "-p", "8086:80", ALPINE, "top"}) session.WaitWithDefaultTimeout() - Expect(session).Should(ExitWithError()) + Expect(session).Should(ExitWithError(125, "invalid config provided: published or exposed ports must be defined when the pod is created: network cannot be configured when it is shared with a pod")) }) It("create container in pod publish ports should fail", func() { @@ -618,7 +611,7 @@ var _ = Describe("Podman create", func() { session := podmanTest.Podman([]string{"create", "--pod", name, "-P", ALPINE, "top"}) session.WaitWithDefaultTimeout() - Expect(session).Should(ExitWithError()) + Expect(session).Should(ExitWithError(125, "invalid config provided: published or exposed ports must be defined when the pod is created: network cannot be configured when it is shared with a pod")) }) It("create use local store image if input image contains a manifest list", func() { @@ -642,32 +635,26 @@ var _ = Describe("Podman create", func() { It("podman create -d should fail, can not detach create containers", func() { session := podmanTest.Podman([]string{"create", "-d", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("unknown shorthand flag")) + Expect(session).Should(ExitWithError(125, "unknown shorthand flag: 'd' in -d")) session = podmanTest.Podman([]string{"create", "--detach", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("unknown flag")) + Expect(session).Should(ExitWithError(125, "unknown flag")) + Expect(session.ErrorToString()).To(ContainSubstring("unknown flag: --detach")) session = podmanTest.Podman([]string{"create", "--detach-keys", "ctrl-x", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("unknown flag")) + Expect(session).Should(ExitWithError(125, "unknown flag: --detach-keys")) }) It("podman create --platform", func() { session := podmanTest.Podman([]string{"create", "--platform=linux/bogus", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - expectedError := "no image found in manifest list for architecture bogus" - Expect(session.ErrorToString()).To(ContainSubstring(expectedError)) + Expect(session).Should(ExitWithError(125, `no image found in manifest list for architecture "bogus"`)) session = podmanTest.Podman([]string{"create", "--platform=linux/arm64", "--os", "windows", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - expectedError = "--platform option can not be specified with --arch or --os" - Expect(session.ErrorToString()).To(ContainSubstring(expectedError)) + Expect(session).Should(ExitWithError(125, "--platform option can not be specified with --arch or --os")) session = podmanTest.Podman([]string{"create", "-q", "--platform=linux/arm64", ALPINE}) session.WaitWithDefaultTimeout() diff --git a/test/e2e/diff_test.go b/test/e2e/diff_test.go index 1db506c840..7507f35def 100644 --- a/test/e2e/diff_test.go +++ b/test/e2e/diff_test.go @@ -8,7 +8,6 @@ import ( "github.com/containers/storage/pkg/stringid" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman diff", func() { @@ -23,7 +22,7 @@ var _ = Describe("Podman diff", func() { It("podman diff bogus image", func() { session := podmanTest.Podman([]string{"diff", "1234"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "1234 not found: layer not known")) }) It("podman diff image with json output", func() { @@ -119,7 +118,7 @@ RUN echo test It("podman image diff bogus image", func() { session := podmanTest.Podman([]string{"image", "diff", "1234", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "1234 not found: 1234: image not known")) }) It("podman image diff of the same image", func() { @@ -166,4 +165,13 @@ RUN touch %s`, ALPINE, imagefile) Expect(session.OutputToString()).To(ContainSubstring(confile)) }) + It("podman diff without args", func() { + session := podmanTest.Podman([]string{"diff"}) + session.WaitWithDefaultTimeout() + if IsRemote() { + Expect(session).Should(ExitWithError(125, " requires a name or id")) + } else { + Expect(session).Should(ExitWithError(125, " requires a name, id, or the \"--latest\" flag")) + } + }) }) diff --git a/test/e2e/events_test.go b/test/e2e/events_test.go index 4fb79ece72..63ca8f239c 100644 --- a/test/e2e/events_test.go +++ b/test/e2e/events_test.go @@ -7,7 +7,7 @@ import ( "sync" "time" - "github.com/containers/podman/v5/libpod/events" + "github.com/containers/podman/v5/cmd/podman/system" . "github.com/containers/podman/v5/test/utils" "github.com/containers/storage/pkg/stringid" . "github.com/onsi/ginkgo/v2" @@ -119,7 +119,10 @@ var _ = Describe("Podman events", func() { }) It("podman events format", func() { - _, ec, _ := podmanTest.RunLsContainer("") + start := time.Now() + ctrName := "testCtr" + _, ec, _ := podmanTest.RunLsContainer(ctrName) + end := time.Now() Expect(ec).To(Equal(0)) test := podmanTest.Podman([]string{"events", "--stream=false", "--format", "json"}) @@ -129,21 +132,34 @@ var _ = Describe("Podman events", func() { jsonArr := test.OutputToStringArray() Expect(test.OutputToStringArray()).ShouldNot(BeEmpty()) - event := events.Event{} + event := system.Event{} err := json.Unmarshal([]byte(jsonArr[0]), &event) Expect(err).ToNot(HaveOccurred()) - test = podmanTest.Podman([]string{"events", "--stream=false", "--format", "{{json.}}"}) + test = podmanTest.Podman([]string{ + "events", + "--stream=false", + "--since", strconv.FormatInt(start.Unix(), 10), + "--filter", fmt.Sprintf("container=%s", ctrName), + "--format", "{{json .}}", + }) + test.WaitWithDefaultTimeout() Expect(test).To(ExitCleanly()) jsonArr = test.OutputToStringArray() Expect(test.OutputToStringArray()).ShouldNot(BeEmpty()) - event = events.Event{} + event = system.Event{} err = json.Unmarshal([]byte(jsonArr[0]), &event) Expect(err).ToNot(HaveOccurred()) + Expect(event.Time).To(BeNumerically(">=", start.Unix())) + Expect(event.Time).To(BeNumerically("<=", end.Unix())) + Expect(event.TimeNano).To(BeNumerically(">=", start.UnixNano())) + Expect(event.TimeNano).To(BeNumerically("<=", end.UnixNano())) + Expect(time.Unix(0, event.TimeNano).Unix()).To(BeEquivalentTo(event.Time)) + test = podmanTest.Podman([]string{"events", "--stream=false", "--filter=type=container", "--format", "ID: {{.ID}}"}) test.WaitWithDefaultTimeout() Expect(test).To(ExitCleanly()) diff --git a/test/e2e/exec_test.go b/test/e2e/exec_test.go index 43a9490abc..dcb7ff23f8 100644 --- a/test/e2e/exec_test.go +++ b/test/e2e/exec_test.go @@ -17,13 +17,7 @@ var _ = Describe("Podman exec", func() { It("podman exec into bogus container", func() { session := podmanTest.Podman([]string{"exec", "foobar", "ls"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - }) - - It("podman exec without command", func() { - session := podmanTest.Podman([]string{"exec", "foobar"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, `no container with name or ID "foobar" found: no such container`)) }) It("podman exec simple command", func() { @@ -34,6 +28,11 @@ var _ = Describe("Podman exec", func() { session := podmanTest.Podman([]string{"exec", "test1", "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) + + // With no command + session = podmanTest.Podman([]string{"exec", "test1"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(ExitWithError(125, "must provide a non-empty command to start an exec session: invalid argument")) }) It("podman container exec simple command", func() { @@ -96,7 +95,7 @@ var _ = Describe("Podman exec", func() { session := podmanTest.Podman([]string{"exec", "test1", "sh", "-c", "exit 100"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(100)) + Expect(session).Should(ExitWithError(100, "")) }) It("podman exec in keep-id container drops privileges", func() { @@ -399,13 +398,17 @@ var _ = Describe("Podman exec", func() { setup.WaitWithDefaultTimeout() Expect(setup).Should(ExitCleanly()) + expect := "chdir to `/missing`: No such file or directory" + if podmanTest.OCIRuntime == "runc" { + expect = "chdir to cwd" + } session := podmanTest.Podman([]string{"exec", "--workdir", "/missing", "test1", "pwd"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(127, expect)) session = podmanTest.Podman([]string{"exec", "-w", "/missing", "test1", "pwd"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(127, expect)) }) It("podman exec cannot be invoked", func() { @@ -416,13 +419,19 @@ var _ = Describe("Podman exec", func() { session := podmanTest.Podman([]string{"exec", "test1", "/etc"}) session.WaitWithDefaultTimeout() + // crun (and, we hope, any other future runtimes) + expectedStatus := 126 + expectedMessage := "open executable: Operation not permitted: OCI permission denied" + + // ...but it's much more complicated under runc (#19552) if podmanTest.OCIRuntime == "runc" { - // #19552 and others: some versions of runc exit 255. - Expect(session).Should(ExitWithError()) - } else { - // crun (and, we hope, any other future runtimes) - Expect(session).Should(Exit(126)) + expectedMessage = `exec failed: unable to start container process: exec: "/etc": is a directory` + expectedStatus = 255 + if IsRemote() { + expectedStatus = 125 + } } + Expect(session).Should(ExitWithError(expectedStatus, expectedMessage)) }) It("podman exec command not found", func() { @@ -432,7 +441,7 @@ var _ = Describe("Podman exec", func() { session := podmanTest.Podman([]string{"exec", "test1", "notthere"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(127)) + Expect(session).Should(ExitWithError(127, "OCI runtime attempted to invoke a command that was not found")) }) It("podman exec preserve fds sanity check", func() { @@ -559,8 +568,7 @@ RUN useradd -u 1000 auser`, fedoraMinimal) SkipIfRemote("not supported for --wait") session := podmanTest.Podman([]string{"exec", "--wait", "2", "1234"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(Equal("Error: timed out waiting for container: 1234")) + Expect(session).Should(ExitWithError(125, "timed out waiting for container: 1234")) }) It("podman exec --wait 5 seconds for started container", func() { diff --git a/test/e2e/exists_test.go b/test/e2e/exists_test.go index c86ca5f5fd..3da48b8088 100644 --- a/test/e2e/exists_test.go +++ b/test/e2e/exists_test.go @@ -4,7 +4,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman image|container exists", func() { @@ -22,7 +21,7 @@ var _ = Describe("Podman image|container exists", func() { It("podman image does not exist in local storage", func() { session := podmanTest.Podman([]string{"image", "exists", "alpine9999"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "")) }) It("podman container exists in local storage by name", func() { setup := podmanTest.RunTopContainer("foobar") @@ -56,7 +55,7 @@ var _ = Describe("Podman image|container exists", func() { It("podman container does not exist in local storage", func() { session := podmanTest.Podman([]string{"container", "exists", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "")) }) It("podman pod exists in local storage by name", func() { @@ -90,6 +89,6 @@ var _ = Describe("Podman image|container exists", func() { // The exit code for non-existing pod is incorrect (125 vs 1) session := podmanTest.Podman([]string{"pod", "exists", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "")) }) }) diff --git a/test/e2e/export_test.go b/test/e2e/export_test.go index 36ad540df4..efebd9f8ed 100644 --- a/test/e2e/export_test.go +++ b/test/e2e/export_test.go @@ -48,6 +48,6 @@ var _ = Describe("Podman export", func() { outfile := filepath.Join(podmanTest.TempDir, "container:with:colon.tar") result := podmanTest.Podman([]string{"export", "-o", outfile, cid}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) + Expect(result).To(ExitWithError(125, "invalid filename (should not contain ':')")) }) }) diff --git a/test/e2e/farm_test.go b/test/e2e/farm_test.go index b676a92499..ede6d2d94d 100644 --- a/test/e2e/farm_test.go +++ b/test/e2e/farm_test.go @@ -180,13 +180,13 @@ farm2 [QA] false true cmd = []string{"farm", "update", "--add", "no-node", "farm1"} session = podmanTest.Podman(cmd) session.WaitWithDefaultTimeout() - Expect(session).Should(ExitWithError()) + Expect(session).Should(ExitWithError(125, `cannot add to farm, "no-node" is not a system connection`)) // update farm2 to remove node not in farm connections from it cmd = []string{"farm", "update", "--remove", "QB", "farm2"} session = podmanTest.Podman(cmd) session.WaitWithDefaultTimeout() - Expect(session).Should(ExitWithError()) + Expect(session).Should(ExitWithError(125, `cannot remove from farm, "QB" is not a connection in the farm`)) // check again to ensure that nothing has changed session = podmanTest.Podman(farmListCmd) @@ -209,13 +209,13 @@ farm2 [QA] false true cmd = []string{"farm", "update", "--add", "no-node", "non-existent"} session = podmanTest.Podman(cmd) session.WaitWithDefaultTimeout() - Expect(session).Should(ExitWithError()) + Expect(session).Should(ExitWithError(125, `cannot update farm, "non-existent" farm doesn't exist`)) // update non-existent farm to default cmd = []string{"farm", "update", "--default", "non-existent"} session = podmanTest.Podman(cmd) session.WaitWithDefaultTimeout() - Expect(session).Should(ExitWithError()) + Expect(session).Should(ExitWithError(125, `cannot update farm, "non-existent" farm doesn't exist`)) session = podmanTest.Podman(farmListCmd) session.WaitWithDefaultTimeout() diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go index 05396c1249..bb8800a8db 100644 --- a/test/e2e/generate_kube_test.go +++ b/test/e2e/generate_kube_test.go @@ -15,22 +15,21 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" "sigs.k8s.io/yaml" ) var _ = Describe("Podman kube generate", func() { It("pod on bogus object", func() { - session := podmanTest.Podman([]string{"generate", "kube", "foobar"}) + session := podmanTest.Podman([]string{"generate", "kube", "foobarpod"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `name or ID "foobarpod" not found`)) }) It("service on bogus object", func() { - session := podmanTest.Podman([]string{"kube", "generate", "-s", "foobar"}) + session := podmanTest.Podman([]string{"kube", "generate", "-s", "foobarservice"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `name or ID "foobarservice" not found`)) }) It("on container", func() { @@ -184,7 +183,7 @@ var _ = Describe("Podman kube generate", func() { err := yaml.Unmarshal(kube.Out.Contents(), pod) Expect(err).ToNot(HaveOccurred()) Expect(pod.Spec).To(HaveField("HostNetwork", false)) - Expect(pod.Annotations).To(BeEmpty()) + Expect(pod.Annotations).To(HaveLen(1)) numContainers := 0 for range pod.Spec.Containers { @@ -390,7 +389,7 @@ var _ = Describe("Podman kube generate", func() { ctrSession := podmanTest.Podman([]string{"create", "--name", "testCtr", "--pod", podName, "-p", "9000:8000", CITEST_IMAGE, "top"}) ctrSession.WaitWithDefaultTimeout() - Expect(ctrSession).Should(Exit(125)) + Expect(ctrSession).Should(ExitWithError(125, "invalid config provided: published or exposed ports must be defined when the pod is created: network cannot be configured when it is shared with a pod")) // Ports without Net sharing should work with ports being set for each container in the generated kube yaml podName = "testNet" @@ -431,7 +430,7 @@ var _ = Describe("Podman kube generate", func() { ctrSession := podmanTest.Podman([]string{"create", "--name", "testCtr", "--pod", podName, "--hostname", "test-hostname", CITEST_IMAGE, "top"}) ctrSession.WaitWithDefaultTimeout() - Expect(ctrSession).Should(Exit(125)) + Expect(ctrSession).Should(ExitWithError(125, "invalid config provided: cannot set hostname when joining the pod UTS namespace: invalid configuration")) // Hostname without uts sharing should work, but generated kube yaml will have pod hostname // set to the hostname of the first container @@ -668,11 +667,11 @@ var _ = Describe("Podman kube generate", func() { It("on pod with ports", func() { podName := "test" - lock4 := GetPortLock("4000") + lock4 := GetPortLock("4008") defer lock4.Unlock() - lock5 := GetPortLock("5000") + lock5 := GetPortLock("5008") defer lock5.Unlock() - podSession := podmanTest.Podman([]string{"pod", "create", "--name", podName, "-p", "4000:4000", "-p", "5000:5000"}) + podSession := podmanTest.Podman([]string{"pod", "create", "--name", podName, "-p", "4008:4000", "-p", "5008:5000"}) podSession.WaitWithDefaultTimeout() Expect(podSession).Should(ExitCleanly()) @@ -694,8 +693,8 @@ var _ = Describe("Podman kube generate", func() { err := yaml.Unmarshal(kube.Out.Contents(), pod) Expect(err).ToNot(HaveOccurred()) - foundPort4000 := 0 - foundPort5000 := 0 + foundPort400x := 0 + foundPort500x := 0 foundOtherPort := 0 for _, ctr := range pod.Spec.Containers { for _, port := range ctr.Ports { @@ -703,17 +702,17 @@ var _ = Describe("Podman kube generate", func() { // have anything for protocol under the ports as tcp is the default // for k8s Expect(port.Protocol).To(BeEmpty()) - if port.HostPort == 4000 { - foundPort4000++ - } else if port.HostPort == 5000 { - foundPort5000++ + if port.HostPort == 4008 { + foundPort400x++ + } else if port.HostPort == 5008 { + foundPort500x++ } else { foundOtherPort++ } } } - Expect(foundPort4000).To(Equal(1)) - Expect(foundPort5000).To(Equal(1)) + Expect(foundPort400x).To(Equal(1)) + Expect(foundPort500x).To(Equal(1)) Expect(foundOtherPort).To(Equal(0)) // Create container with UDP port and check the generated kube yaml @@ -955,7 +954,7 @@ var _ = Describe("Podman kube generate", func() { kube := podmanTest.Podman([]string{"kube", "generate", "top"}) kube.WaitWithDefaultTimeout() - Expect(kube).To(ExitWithError()) + Expect(kube).To(ExitWithError(125, " is associated with pod ")) }) It("with multiple containers", func() { @@ -983,7 +982,7 @@ var _ = Describe("Podman kube generate", func() { kube := podmanTest.Podman([]string{"kube", "generate", "top1", "top2"}) kube.WaitWithDefaultTimeout() - Expect(kube).To(ExitWithError()) + Expect(kube).To(ExitWithError(125, " is associated with pod ")) }) It("on a container with dns options", func() { @@ -1522,7 +1521,7 @@ USER test1` kube := podmanTest.Podman([]string{"kube", "generate", "--type", "pod", "--replicas", "3", ctrName}) kube.WaitWithDefaultTimeout() - Expect(kube).Should(Exit(125)) + Expect(kube).Should(ExitWithError(125, "--replicas can only be set when --type is set to deployment")) }) It("on pod with --type=deployment and --restart=no should fail", func() { @@ -1537,7 +1536,7 @@ USER test1` kube := podmanTest.Podman([]string{"kube", "generate", "--type", "deployment", podName}) kube.WaitWithDefaultTimeout() - Expect(kube).Should(Exit(125)) + Expect(kube).Should(ExitWithError(125, "k8s Deployments can only have restartPolicy set to Always")) }) It("on pod with invalid name", func() { @@ -1562,117 +1561,6 @@ USER test1` Expect(pod.Spec.Hostname).To(Equal("")) }) - It("--no-trunc on container with long annotation", func() { - ctrName := "demo" - vol1 := filepath.Join(podmanTest.TempDir, RandomString(99)) - err := os.MkdirAll(vol1, 0755) - Expect(err).ToNot(HaveOccurred()) - - session := podmanTest.Podman([]string{"create", "-v", vol1 + ":/tmp/foo:Z", "--name", ctrName, CITEST_IMAGE}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - - kube := podmanTest.Podman([]string{"kube", "generate", "--no-trunc", ctrName}) - kube.WaitWithDefaultTimeout() - Expect(kube).Should(ExitCleanly()) - - pod := new(v1.Pod) - err = yaml.Unmarshal(kube.Out.Contents(), pod) - Expect(err).ToNot(HaveOccurred()) - - Expect(pod.Annotations).To(HaveKeyWithValue(define.BindMountPrefix, vol1+":Z")) - Expect(pod.Annotations).To(Not(HaveKeyWithValue(define.BindMountPrefix, vol1[:define.MaxKubeAnnotation]))) - }) - - It("on container with long annotation", func() { - ctrName := "demo" - vol1 := filepath.Join(podmanTest.TempDir, RandomString(99)) - err := os.MkdirAll(vol1, 0755) - Expect(err).ToNot(HaveOccurred()) - - session := podmanTest.Podman([]string{"create", "-v", vol1 + ":/tmp/foo:Z", "--name", ctrName, CITEST_IMAGE}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - - kube := podmanTest.Podman([]string{"kube", "generate", ctrName}) - kube.WaitWithDefaultTimeout() - Expect(kube).Should(Exit(0)) - if IsRemote() { - Expect(kube.ErrorToString()).To(BeEmpty()) - } else { - Expect(kube.ErrorToString()).To(ContainSubstring("Truncation Annotation:")) - Expect(kube.ErrorToString()).To(ContainSubstring("Kubernetes only allows 63 characters")) - } - - pod := new(v1.Pod) - err = yaml.Unmarshal(kube.Out.Contents(), pod) - Expect(err).ToNot(HaveOccurred()) - - Expect(pod.Annotations).To(HaveKeyWithValue(define.BindMountPrefix, vol1[:define.MaxKubeAnnotation])) - Expect(pod.Annotations).To(Not(HaveKeyWithValue(define.BindMountPrefix, vol1+":Z"))) - }) - - It("--no-trunc on pod with long annotation", func() { - ctrName := "demoCtr" - podName := "demoPod" - vol1 := filepath.Join(podmanTest.TempDir, RandomString(99)) - err := os.MkdirAll(vol1, 0755) - Expect(err).ToNot(HaveOccurred()) - - session := podmanTest.Podman([]string{"pod", "create", podName}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - - session = podmanTest.Podman([]string{"create", "-v", vol1 + ":/tmp/foo:Z", "--name", ctrName, "--pod", podName, CITEST_IMAGE}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - - kube := podmanTest.Podman([]string{"kube", "generate", "--no-trunc", podName}) - kube.WaitWithDefaultTimeout() - Expect(kube).Should(ExitCleanly()) - - pod := new(v1.Pod) - err = yaml.Unmarshal(kube.Out.Contents(), pod) - Expect(err).ToNot(HaveOccurred()) - - Expect(pod.Annotations).To(HaveKeyWithValue(define.BindMountPrefix, vol1+":Z")) - Expect(pod.Annotations).To(Not(HaveKeyWithValue(define.BindMountPrefix, vol1[:define.MaxKubeAnnotation]))) - }) - - It("on pod with long annotation", func() { - ctrName := "demoCtr" - podName := "demoPod" - vol1 := filepath.Join(podmanTest.TempDir, RandomString(99)) - err := os.MkdirAll(vol1, 0755) - Expect(err).ToNot(HaveOccurred()) - - session := podmanTest.Podman([]string{"pod", "create", podName}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - - session = podmanTest.Podman([]string{"create", "-v", vol1 + ":/tmp/foo:Z", "--name", ctrName, "--pod", podName, CITEST_IMAGE}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - - kube := podmanTest.Podman([]string{"kube", "generate", podName}) - kube.WaitWithDefaultTimeout() - Expect(kube).Should(Exit(0)) - - if IsRemote() { - Expect(kube.ErrorToString()).To(BeEmpty()) - } else { - Expect(kube.ErrorToString()).To(ContainSubstring("Truncation Annotation:")) - Expect(kube.ErrorToString()).To(ContainSubstring("Kubernetes only allows 63 characters")) - } - - pod := new(v1.Pod) - err = yaml.Unmarshal(kube.Out.Contents(), pod) - Expect(err).ToNot(HaveOccurred()) - - Expect(pod.Annotations).To(HaveKeyWithValue(define.BindMountPrefix, vol1[:define.MaxKubeAnnotation])) - Expect(pod.Annotations).To(Not(HaveKeyWithValue(define.BindMountPrefix, vol1+":Z"))) - }) - It("--podman-only on container with --volumes-from", func() { ctr1 := "ctr1" ctr2 := "ctr2" @@ -1930,7 +1818,7 @@ EXPOSE 2004-2005/tcp`, CITEST_IMAGE) pod := new(v1.Pod) err := yaml.Unmarshal(kube.Out.Contents(), pod) Expect(err).ToNot(HaveOccurred()) - Expect(pod.Annotations).To(BeEmpty()) + Expect(pod.Annotations).To(HaveLen(1)) }) It("on pod with --stop-timeout set for ctr", func() { @@ -1988,8 +1876,7 @@ EXPOSE 2004-2005/tcp`, CITEST_IMAGE) kube := podmanTest.Podman([]string{"kube", "generate", "--type", "daemonset", "--replicas", "3", ctrName}) kube.WaitWithDefaultTimeout() - Expect(kube).Should(Exit(125)) - Expect(kube.ErrorToString()).To(ContainSubstring("--replicas can only be set when --type is set to deployment")) + Expect(kube).Should(ExitWithError(125, "--replicas can only be set when --type is set to deployment")) }) It("on pod with --type=daemonset and --restart=no should fail", func() { @@ -2004,7 +1891,6 @@ EXPOSE 2004-2005/tcp`, CITEST_IMAGE) kube := podmanTest.Podman([]string{"kube", "generate", "--type", "daemonset", podName}) kube.WaitWithDefaultTimeout() - Expect(kube).Should(Exit(125)) - Expect(kube.ErrorToString()).To(ContainSubstring("k8s DaemonSets can only have restartPolicy set to Always")) + Expect(kube).Should(ExitWithError(125, "k8s DaemonSets can only have restartPolicy set to Always")) }) }) diff --git a/test/e2e/generate_spec_test.go b/test/e2e/generate_spec_test.go index 8fa378393e..b4641ae57c 100644 --- a/test/e2e/generate_spec_test.go +++ b/test/e2e/generate_spec_test.go @@ -18,7 +18,7 @@ var _ = Describe("Podman generate spec", func() { It("podman generate spec bogus should fail", func() { session := podmanTest.Podman([]string{"generate", "spec", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).Should(ExitWithError()) + Expect(session).Should(ExitWithError(125, "could not find a pod or container with the id foobar")) }) It("podman generate spec basic usage", func() { diff --git a/test/e2e/generate_systemd_test.go b/test/e2e/generate_systemd_test.go index 824cf65132..81483223f5 100644 --- a/test/e2e/generate_systemd_test.go +++ b/test/e2e/generate_systemd_test.go @@ -2,6 +2,7 @@ package integration import ( "os" + "path/filepath" "strings" . "github.com/containers/podman/v5/test/utils" @@ -15,19 +16,13 @@ var _ = Describe("Podman generate systemd", func() { It("podman generate systemd on bogus container/pod", func() { session := podmanTest.Podman([]string{"generate", "systemd", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - }) - - It("podman generate systemd bad restart policy", func() { - session := podmanTest.Podman([]string{"generate", "systemd", "--restart-policy", "never", "foobar"}) - session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `foobar does not refer to a container or pod: no pod with name or ID foobar found: no such pod: no container with name or ID "foobar" found: no such container`)) }) It("podman generate systemd bad timeout value", func() { session := podmanTest.Podman([]string{"generate", "systemd", "--time", "-1", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `invalid argument "-1" for "-t, --time" flag: strconv.ParseUint: parsing "-1": invalid syntax`)) }) It("podman generate systemd bad restart-policy value", func() { @@ -37,8 +32,7 @@ var _ = Describe("Podman generate systemd", func() { session = podmanTest.Podman([]string{"generate", "systemd", "--restart-policy", "bogus", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("bogus is not a valid restart policy")) + Expect(session).To(ExitWithError(125, "bogus is not a valid restart policy")) }) It("podman generate systemd with --no-header=true", func() { @@ -224,13 +218,11 @@ var _ = Describe("Podman generate systemd", func() { // Fail for the pod session = podmanTest.Podman([]string{"generate", "systemd", "foo"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("cannot generate systemd units for init containers")) + Expect(session).Should(ExitWithError(125, "cannot generate systemd units for init containers")) // Fail for the init container session = podmanTest.Podman([]string{"generate", "systemd", "foo-init"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("cannot generate systemd units for init containers")) + Expect(session).Should(ExitWithError(125, "cannot generate systemd units for init containers")) }) It("podman generate systemd pod --name --files", func() { @@ -544,8 +536,7 @@ var _ = Describe("Podman generate systemd", func() { }) It("podman generate systemd pod with containers --new", func() { - tmpDir := GinkgoT().TempDir() - tmpFile := tmpDir + "podID" + tmpFile := filepath.Join(tempdir, "podID") n := podmanTest.Podman([]string{"pod", "create", "--pod-id-file", tmpFile, "--name", "foo"}) n.WaitWithDefaultTimeout() @@ -628,8 +619,7 @@ var _ = Describe("Podman generate systemd", func() { session = podmanTest.Podman([]string{"generate", "systemd", "--env", "=bar", "-e", "hoge=fuga", "test"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("invalid variable")) + Expect(session).Should(ExitWithError(125, "invalid variable")) // Use -e/--env option with --new option session = podmanTest.Podman([]string{"generate", "systemd", "--env", "foo=bar", "-e", "hoge=fuga", "--new", "test"}) @@ -640,8 +630,7 @@ var _ = Describe("Podman generate systemd", func() { session = podmanTest.Podman([]string{"generate", "systemd", "--env", "foo=bar", "-e", "=fuga", "--new", "test"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("invalid variable")) + Expect(session).Should(ExitWithError(125, "invalid variable")) // Escape systemd arguments session = podmanTest.Podman([]string{"generate", "systemd", "--env", "BAR=my test", "-e", "USER=%a", "test"}) diff --git a/test/e2e/healthcheck_run_test.go b/test/e2e/healthcheck_run_test.go index 1c44c35ad3..b219b00f7c 100644 --- a/test/e2e/healthcheck_run_test.go +++ b/test/e2e/healthcheck_run_test.go @@ -4,13 +4,13 @@ import ( "fmt" "os" "path/filepath" + "strconv" "time" "github.com/containers/podman/v5/libpod/define" . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman healthcheck run", func() { @@ -18,7 +18,7 @@ var _ = Describe("Podman healthcheck run", func() { It("podman healthcheck run bogus container", func() { session := podmanTest.Podman([]string{"healthcheck", "run", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `unable to look up foobar to perform a health check: no container with name or ID "foobar" found: no such container`)) }) It("podman disable healthcheck with --no-healthcheck on valid container", func() { @@ -27,7 +27,7 @@ var _ = Describe("Podman healthcheck run", func() { Expect(session).Should(ExitCleanly()) hc := podmanTest.Podman([]string{"healthcheck", "run", "hc"}) hc.WaitWithDefaultTimeout() - Expect(hc).Should(Exit(125)) + Expect(hc).Should(ExitWithError(125, "has no defined healthcheck")) }) It("podman disable healthcheck with --no-healthcheck must not show starting on status", func() { @@ -71,7 +71,7 @@ var _ = Describe("Podman healthcheck run", func() { hc := podmanTest.Podman([]string{"container", "inspect", "--format", "{{.Config.Healthcheck}}", "hc"}) hc.WaitWithDefaultTimeout() Expect(hc).Should(ExitCleanly()) - Expect(hc.OutputToString()).To(Equal("{[CMD-SHELL curl -f http://localhost/ || exit 1] 0s 5m0s 3s 0}")) + Expect(hc.OutputToString()).To(Equal("{[CMD-SHELL curl -f http://localhost/ || exit 1] 0s 0s 5m0s 3s 0}")) }) It("podman disable healthcheck with --health-cmd=none on valid container", func() { @@ -80,7 +80,7 @@ var _ = Describe("Podman healthcheck run", func() { Expect(session).Should(ExitCleanly()) hc := podmanTest.Podman([]string{"healthcheck", "run", "hc"}) hc.WaitWithDefaultTimeout() - Expect(hc).Should(Exit(125)) + Expect(hc).Should(ExitWithError(125, "has no defined healthcheck")) }) It("podman healthcheck on valid container", func() { @@ -116,7 +116,7 @@ var _ = Describe("Podman healthcheck run", func() { hc := podmanTest.Podman([]string{"healthcheck", "run", "hc"}) hc.WaitWithDefaultTimeout() - Expect(hc).Should(Exit(1)) + Expect(hc).Should(ExitWithError(1, "")) }) It("podman healthcheck on stopped container", func() { @@ -126,7 +126,7 @@ var _ = Describe("Podman healthcheck run", func() { hc := podmanTest.Podman([]string{"healthcheck", "run", "hc"}) hc.WaitWithDefaultTimeout() - Expect(hc).Should(Exit(125)) + Expect(hc).Should(ExitWithError(125, "is not running")) }) It("podman healthcheck on container without healthcheck", func() { @@ -136,7 +136,7 @@ var _ = Describe("Podman healthcheck run", func() { hc := podmanTest.Podman([]string{"healthcheck", "run", "hc"}) hc.WaitWithDefaultTimeout() - Expect(hc).Should(Exit(125)) + Expect(hc).Should(ExitWithError(125, "has no defined healthcheck")) }) It("podman healthcheck should be starting", func() { @@ -154,15 +154,15 @@ var _ = Describe("Podman healthcheck run", func() { hc := podmanTest.Podman([]string{"healthcheck", "run", "hc"}) hc.WaitWithDefaultTimeout() - Expect(hc).Should(Exit(1)) + Expect(hc).Should(ExitWithError(1, "")) hc = podmanTest.Podman([]string{"healthcheck", "run", "hc"}) hc.WaitWithDefaultTimeout() - Expect(hc).Should(Exit(1)) + Expect(hc).Should(ExitWithError(1, "")) hc = podmanTest.Podman([]string{"healthcheck", "run", "hc"}) hc.WaitWithDefaultTimeout() - Expect(hc).Should(Exit(1)) + Expect(hc).Should(ExitWithError(1, "")) inspect := podmanTest.InspectContainer("hc") Expect(inspect[0].State.Health).To(HaveField("Status", "starting")) @@ -177,14 +177,14 @@ var _ = Describe("Podman healthcheck run", func() { hc := podmanTest.Podman([]string{"healthcheck", "run", "hc"}) hc.WaitWithDefaultTimeout() - Expect(hc).Should(Exit(1)) + Expect(hc).Should(ExitWithError(1, "")) inspect := podmanTest.InspectContainer("hc") Expect(inspect[0].State.Health).To(HaveField("Status", "starting")) hc = podmanTest.Podman([]string{"healthcheck", "run", "hc"}) hc.WaitWithDefaultTimeout() - Expect(hc).Should(Exit(1)) + Expect(hc).Should(ExitWithError(1, "")) inspect = podmanTest.InspectContainer("hc") Expect(inspect[0].State.Health).To(HaveField("Status", define.HealthCheckUnhealthy)) @@ -212,56 +212,78 @@ var _ = Describe("Podman healthcheck run", func() { hc := podmanTest.Podman([]string{"healthcheck", "run", "hc"}) hc.WaitWithDefaultTimeout() - Expect(hc).Should(Exit(1)) + Expect(hc).Should(ExitWithError(1, "")) }) - It("podman healthcheck single healthy result changes failed to healthy", func() { - session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--health-retries", "2", "--health-cmd", "ls /foo || exit 1", ALPINE, "top"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) + // Run this test with and without healthcheck events, even without events + // podman inspect and ps should still show accurate healthcheck results. + for _, hcEvent := range []bool{true, false} { + hcEvent := hcEvent + testName := "hc_events=" + strconv.FormatBool(hcEvent) + It("podman healthcheck single healthy result changes failed to healthy "+testName, func() { + if !hcEvent { + path := filepath.Join(podmanTest.TempDir, "containers.conf") + err := os.WriteFile(path, []byte("[engine]\nhealthcheck_events=false\n"), 0o644) + Expect(err).ToNot(HaveOccurred()) + err = os.Setenv("CONTAINERS_CONF_OVERRIDE", path) + Expect(err).ToNot(HaveOccurred()) + if IsRemote() { + podmanTest.StopRemoteService() + podmanTest.StartRemoteService() + } + } - hc := podmanTest.Podman([]string{"healthcheck", "run", "hc"}) - hc.WaitWithDefaultTimeout() - Expect(hc).Should(Exit(1)) + session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--health-retries", "2", "--health-cmd", "ls /foo || exit 1", ALPINE, "top"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(ExitCleanly()) - inspect := podmanTest.InspectContainer("hc") - Expect(inspect[0].State.Health).To(HaveField("Status", "starting")) - - hc = podmanTest.Podman([]string{"healthcheck", "run", "hc"}) - hc.WaitWithDefaultTimeout() - Expect(hc).Should(Exit(1)) + hc := podmanTest.Podman([]string{"healthcheck", "run", "hc"}) + hc.WaitWithDefaultTimeout() + Expect(hc).Should(ExitWithError(1, "")) - inspect = podmanTest.InspectContainer("hc") - Expect(inspect[0].State.Health).To(HaveField("Status", define.HealthCheckUnhealthy)) + inspect := podmanTest.InspectContainer("hc") + Expect(inspect[0].State.Health).To(HaveField("Status", "starting")) - foo := podmanTest.Podman([]string{"exec", "hc", "touch", "/foo"}) - foo.WaitWithDefaultTimeout() - Expect(foo).Should(ExitCleanly()) + hc = podmanTest.Podman([]string{"healthcheck", "run", "hc"}) + hc.WaitWithDefaultTimeout() + Expect(hc).Should(ExitWithError(1, "")) - hc = podmanTest.Podman([]string{"healthcheck", "run", "hc"}) - hc.WaitWithDefaultTimeout() - Expect(hc).Should(ExitCleanly()) + inspect = podmanTest.InspectContainer("hc") + Expect(inspect[0].State.Health).To(HaveField("Status", define.HealthCheckUnhealthy)) - inspect = podmanTest.InspectContainer("hc") - Expect(inspect[0].State.Health).To(HaveField("Status", define.HealthCheckHealthy)) + foo := podmanTest.Podman([]string{"exec", "hc", "touch", "/foo"}) + foo.WaitWithDefaultTimeout() + Expect(foo).Should(ExitCleanly()) - // Test that events generated have correct status (#19237) - events := podmanTest.Podman([]string{"events", "--stream=false", "--filter", "event=health_status", "--since", "1m"}) - events.WaitWithDefaultTimeout() - Expect(events).Should(ExitCleanly()) - eventsOut := events.OutputToStringArray() - Expect(eventsOut).To(HaveLen(3)) - Expect(eventsOut[0]).To(ContainSubstring("health_status=starting")) - Expect(eventsOut[1]).To(ContainSubstring("health_status=unhealthy")) - Expect(eventsOut[2]).To(ContainSubstring("health_status=healthy")) + hc = podmanTest.Podman([]string{"healthcheck", "run", "hc"}) + hc.WaitWithDefaultTimeout() + Expect(hc).Should(ExitCleanly()) + + inspect = podmanTest.InspectContainer("hc") + Expect(inspect[0].State.Health).To(HaveField("Status", define.HealthCheckHealthy)) + + // Test that events generated have correct status (#19237) + events := podmanTest.Podman([]string{"events", "--stream=false", "--filter", "event=health_status", "--since", "1m"}) + events.WaitWithDefaultTimeout() + Expect(events).Should(ExitCleanly()) + if hcEvent { + eventsOut := events.OutputToStringArray() + Expect(eventsOut).To(HaveLen(3)) + Expect(eventsOut[0]).To(ContainSubstring("health_status=starting")) + Expect(eventsOut[1]).To(ContainSubstring("health_status=unhealthy")) + Expect(eventsOut[2]).To(ContainSubstring("health_status=healthy")) + } else { + Expect(events.OutputToString()).To(BeEmpty()) + } - // Test podman ps --filter health is working (#11687) - ps := podmanTest.Podman([]string{"ps", "--filter", "health=healthy"}) - ps.WaitWithDefaultTimeout() - Expect(ps).Should(ExitCleanly()) - Expect(ps.OutputToStringArray()).To(HaveLen(2)) - Expect(ps.OutputToString()).To(ContainSubstring("hc")) - }) + // Test podman ps --filter health is working (#11687) + ps := podmanTest.Podman([]string{"ps", "--filter", "health=healthy"}) + ps.WaitWithDefaultTimeout() + Expect(ps).Should(ExitCleanly()) + Expect(ps.OutputToStringArray()).To(HaveLen(2)) + Expect(ps.OutputToString()).To(ContainSubstring("hc")) + }) + } It("hc logs do not include exec events", func() { session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--health-cmd", "true", "--health-interval", "5s", "alpine", "sleep", "60"}) @@ -343,7 +365,7 @@ HEALTHCHECK CMD ls -l / 2>&1`, ALPINE) hc := podmanTest.Podman([]string{"healthcheck", "run", ctrName}) hc.WaitWithDefaultTimeout() - Expect(hc).Should(Exit(1)) + Expect(hc).Should(ExitWithError(1, "")) exec := podmanTest.Podman([]string{"exec", ctrName, "sh", "-c", "touch /test && echo startup > /test"}) exec.WaitWithDefaultTimeout() diff --git a/test/e2e/image_scp_test.go b/test/e2e/image_scp_test.go index 5dd1a3a54d..e19da2d09e 100644 --- a/test/e2e/image_scp_test.go +++ b/test/e2e/image_scp_test.go @@ -17,7 +17,7 @@ var _ = Describe("podman image scp", func() { It("podman image scp bogus image", func() { scp := podmanTest.Podman([]string{"image", "scp", "FOOBAR"}) scp.WaitWithDefaultTimeout() - Expect(scp).Should(ExitWithError()) + Expect(scp).Should(ExitWithError(125, "must specify a destination: invalid argument")) }) It("podman image scp with proper connection", func() { @@ -38,12 +38,7 @@ var _ = Describe("podman image scp", func() { // exit with error because we cannot make an actual ssh connection // This tests that the input we are given is validated and prepared correctly // The error given should either be a missing image (due to testing suite complications) or a no such host timeout on ssh - Expect(scp).Should(ExitWithError()) - // podman-remote exits with a different error - if !IsRemote() { - Expect(scp.ErrorToString()).Should(ContainSubstring("no such host")) - } - + Expect(scp).Should(ExitWithError(125, "failed to connect: dial tcp: lookup ")) }) }) diff --git a/test/e2e/images_test.go b/test/e2e/images_test.go index 63a029f197..ddde5a27bc 100644 --- a/test/e2e/images_test.go +++ b/test/e2e/images_test.go @@ -217,8 +217,8 @@ WORKDIR /test podmanTest.BuildImage(dockerfile, "foobar.com/before:latest", "false") result := podmanTest.Podman([]string{"images", "-q", "-f", "dangling=true"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(0), "dangling image output: %q", result.OutputToString()) - Expect(result.OutputToStringArray()).Should(BeEmpty(), "dangling image output: %q", result.OutputToString()) + Expect(result).Should(ExitCleanly()) + Expect(result.OutputToStringArray()).Should(BeEmpty(), "dangling image output") }) It("podman images filter intermediate", func() { diff --git a/test/e2e/import_test.go b/test/e2e/import_test.go index e22b819912..fb85182e8f 100644 --- a/test/e2e/import_test.go +++ b/test/e2e/import_test.go @@ -171,16 +171,10 @@ var _ = Describe("Podman import", func() { importImage := podmanTest.Podman([]string{"import", "-q", "--signature-policy", "/no/such/file", outfile}) importImage.WaitWithDefaultTimeout() - Expect(importImage).To(ExitWithError()) + Expect(importImage).To(ExitWithError(125, "open /no/such/file: no such file or directory")) result := podmanTest.Podman([]string{"import", "-q", "--signature-policy", "/etc/containers/policy.json", outfile}) result.WaitWithDefaultTimeout() - if IsRemote() { - Expect(result).To(ExitWithError()) - Expect(result.ErrorToString()).To(ContainSubstring("unknown flag")) - result := podmanTest.Podman([]string{"import", "-q", outfile}) - result.WaitWithDefaultTimeout() - } Expect(result).Should(ExitCleanly()) }) }) diff --git a/test/e2e/info_test.go b/test/e2e/info_test.go index ef009cb22a..5bf028a207 100644 --- a/test/e2e/info_test.go +++ b/test/e2e/info_test.go @@ -83,10 +83,11 @@ var _ = Describe("Podman Info", func() { rootlessStoragePath := `"/tmp/$HOME/$USER/$UID/storage"` driver := `"overlay"` - storageOpt := `"/usr/bin/fuse-overlayfs"` - storageConf := []byte(fmt.Sprintf("[storage]\ndriver=%s\nrootless_storage_path=%s\n[storage.options]\nmount_program=%s", driver, rootlessStoragePath, storageOpt)) + storageConf := []byte(fmt.Sprintf("[storage]\ndriver=%s\nrootless_storage_path=%s\n[storage.options]\n", driver, rootlessStoragePath)) err = os.WriteFile(configPath, storageConf, os.ModePerm) Expect(err).ToNot(HaveOccurred()) + // Failures in this test are impossible to debug without breadcrumbs + GinkgoWriter.Printf("CONTAINERS_STORAGE_CONF=%s:\n%s\n", configPath, storageConf) u, err := user.Current() Expect(err).ToNot(HaveOccurred()) @@ -96,8 +97,9 @@ var _ = Describe("Podman Info", func() { podmanPath := podmanTest.PodmanTest.PodmanBinary cmd := exec.Command(podmanPath, "info", "--format", "{{.Store.GraphRoot -}}") out, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred()) - Expect(string(out)).To(Equal(expect)) + GinkgoWriter.Printf("Running: podman info --format {{.Store.GraphRoot -}}\nOutput: %s\n", string(out)) + Expect(err).ToNot(HaveOccurred(), "podman info") + Expect(string(out)).To(Equal(expect), "output from podman info") }) It("check RemoteSocket ", func() { @@ -222,8 +224,7 @@ var _ = Describe("Podman Info", func() { // make sure we get an error for bogus values session := podmanTest.Podman([]string{"--db-backend", "bogus", "info", "--format", "{{.Host.DatabaseBackend}}"}) session.WaitWithDefaultTimeout() - Expect(session).To(Exit(125)) - Expect(session.ErrorToString()).To(Equal("Error: unsupported database backend: \"bogus\"")) + Expect(session).To(ExitWithError(125, `Error: unsupported database backend: "bogus"`)) }) It("Podman info: check desired storage driver", func() { diff --git a/test/e2e/init_test.go b/test/e2e/init_test.go index 5c8cb14f34..dae57b6c88 100644 --- a/test/e2e/init_test.go +++ b/test/e2e/init_test.go @@ -1,10 +1,11 @@ package integration import ( + "fmt" + . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman init", func() { @@ -12,13 +13,13 @@ var _ = Describe("Podman init", func() { It("podman init bogus container", func() { session := podmanTest.Podman([]string{"start", "123456"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, `Error: no container with name or ID "123456" found: no such container`)) }) It("podman init with no arguments", func() { session := podmanTest.Podman([]string{"start"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "Error: start requires at least one argument")) }) It("podman init single container by ID", func() { @@ -110,8 +111,10 @@ var _ = Describe("Podman init", func() { session := podmanTest.Podman([]string{"run", "--name", "init_test", "-d", ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) + cid := session.OutputToString() + init := podmanTest.Podman([]string{"init", "init_test"}) init.WaitWithDefaultTimeout() - Expect(init).Should(Exit(125)) + Expect(init).Should(ExitWithError(125, fmt.Sprintf("Error: container %s has already been created in runtime: container state improper", cid))) }) }) diff --git a/test/e2e/inspect_test.go b/test/e2e/inspect_test.go index 637535f9a3..202ca036ae 100644 --- a/test/e2e/inspect_test.go +++ b/test/e2e/inspect_test.go @@ -1,10 +1,11 @@ package integration import ( + "fmt" + . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" "github.com/opencontainers/selinux/go-selinux" ) @@ -22,7 +23,7 @@ var _ = Describe("Podman inspect", func() { It("podman inspect bogus container", func() { session := podmanTest.Podman([]string{"inspect", "foobar4321"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `no such object: "foobar4321"`)) }) It("podman inspect filter should work if result contains tab", func() { @@ -128,7 +129,7 @@ var _ = Describe("Podman inspect", func() { SkipIfRemote("--latest flag n/a") result := podmanTest.Podman([]string{"inspect", "-l", "1234foobar"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + Expect(result).Should(ExitWithError(125, "--latest and arguments cannot be used together")) }) It("podman inspect with mount filters", func() { @@ -173,7 +174,7 @@ var _ = Describe("Podman inspect", func() { session := podmanTest.Podman([]string{"inspect", "--latest"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "no containers to inspect: no such container")) }) It("podman [image,container] inspect on image", func() { @@ -185,7 +186,11 @@ var _ = Describe("Podman inspect", func() { ctrInspect := podmanTest.Podman([]string{"container", "inspect", ALPINE}) ctrInspect.WaitWithDefaultTimeout() - Expect(ctrInspect).To(ExitWithError()) + if IsRemote() { + Expect(ctrInspect).To(ExitWithError(125, fmt.Sprintf("no such container %q", ALPINE))) + } else { + Expect(ctrInspect).To(ExitWithError(125, fmt.Sprintf("no such container %s", ALPINE))) + } imageInspect := podmanTest.Podman([]string{"image", "inspect", ALPINE}) imageInspect.WaitWithDefaultTimeout() @@ -197,7 +202,7 @@ var _ = Describe("Podman inspect", func() { }) It("podman [image, container] inspect on container", func() { - ctrName := "testCtr" + ctrName := "testctr" create := podmanTest.Podman([]string{"create", "--name", ctrName, ALPINE, "sh"}) create.WaitWithDefaultTimeout() Expect(create).Should(ExitCleanly()) @@ -216,7 +221,7 @@ var _ = Describe("Podman inspect", func() { imageInspect := podmanTest.Podman([]string{"image", "inspect", ctrName}) imageInspect.WaitWithDefaultTimeout() - Expect(imageInspect).To(ExitWithError()) + Expect(imageInspect).To(ExitWithError(125, fmt.Sprintf("%s: image not known", ctrName))) Expect(baseJSON[0]).To(HaveField("ID", ctrJSON[0].ID)) }) @@ -224,7 +229,7 @@ var _ = Describe("Podman inspect", func() { It("podman inspect always produces a valid array", func() { baseInspect := podmanTest.Podman([]string{"inspect", "doesNotExist"}) baseInspect.WaitWithDefaultTimeout() - Expect(baseInspect).To(ExitWithError()) + Expect(baseInspect).To(ExitWithError(125, `no such object: "doesNotExist"`)) emptyJSON := baseInspect.InspectContainerToJSON() Expect(emptyJSON).To(BeEmpty()) }) @@ -237,7 +242,7 @@ var _ = Describe("Podman inspect", func() { baseInspect := podmanTest.Podman([]string{"inspect", ctrName, "doesNotExist"}) baseInspect.WaitWithDefaultTimeout() - Expect(baseInspect).To(ExitWithError()) + Expect(baseInspect).To(ExitWithError(125, `no such object: "doesNotExist"`)) baseJSON := baseInspect.InspectContainerToJSON() Expect(baseJSON).To(HaveLen(1)) Expect(baseJSON[0]).To(HaveField("Name", ctrName)) @@ -383,6 +388,7 @@ var _ = Describe("Podman inspect", func() { Expect(session).Should(ExitCleanly()) Expect(session.OutputToString()).To(Equal(volName)) }) + It("podman inspect --type container on a pod should fail", func() { podName := "testpod" create := podmanTest.Podman([]string{"pod", "create", "--name", podName}) @@ -391,7 +397,11 @@ var _ = Describe("Podman inspect", func() { inspect := podmanTest.Podman([]string{"inspect", "--type", "container", podName}) inspect.WaitWithDefaultTimeout() - Expect(inspect).To(ExitWithError()) + if IsRemote() { + Expect(inspect).To(ExitWithError(125, fmt.Sprintf("no such container %q", podName))) + } else { + Expect(inspect).To(ExitWithError(125, fmt.Sprintf("no such container %s", podName))) + } }) It("podman inspect --type network on a container should fail", func() { @@ -402,7 +412,7 @@ var _ = Describe("Podman inspect", func() { inspect := podmanTest.Podman([]string{"inspect", "--type", "network", ctrName}) inspect.WaitWithDefaultTimeout() - Expect(inspect).To(ExitWithError()) + Expect(inspect).To(ExitWithError(125, " network not found")) }) It("podman inspect --type pod on a container should fail", func() { @@ -413,7 +423,7 @@ var _ = Describe("Podman inspect", func() { inspect := podmanTest.Podman([]string{"inspect", "--type", "pod", ctrName}) inspect.WaitWithDefaultTimeout() - Expect(inspect).To(ExitWithError()) + Expect(inspect).To(ExitWithError(125, "no such pod ")) }) It("podman inspect --type volume on a container should fail", func() { @@ -424,7 +434,7 @@ var _ = Describe("Podman inspect", func() { inspect := podmanTest.Podman([]string{"inspect", "--type", "volume", ctrName}) inspect.WaitWithDefaultTimeout() - Expect(inspect).To(ExitWithError()) + Expect(inspect).To(ExitWithError(125, "no such volume ")) }) // Fixes https://github.com/containers/podman/issues/8444 @@ -438,7 +448,7 @@ var _ = Describe("Podman inspect", func() { inspect := podmanTest.Podman([]string{"inspect", `--format="{{json .NetworkSettings.Ports}}"`, ctnrName}) inspect.WaitWithDefaultTimeout() Expect(inspect).Should(ExitCleanly()) - Expect(inspect.OutputToString()).To(Equal(`"{"80/tcp":[{"HostIp":"","HostPort":"8084"}]}"`)) + Expect(inspect.OutputToString()).To(Equal(`"{"80/tcp":[{"HostIp":"0.0.0.0","HostPort":"8084"}]}"`)) }) It("Verify container inspect has default network", func() { @@ -573,13 +583,15 @@ var _ = Describe("Podman inspect", func() { Expect(session).Should(ExitCleanly()) Expect(session.OutputToString()).To(BeEmpty()) + commandNotFound := "OCI runtime attempted to invoke a command that was not found" session = podmanTest.Podman([]string{"start", cid}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, commandNotFound)) + session = podmanTest.Podman([]string{"container", "inspect", cid, "-f", "'{{ .State.Error }}"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).ToNot(BeEmpty()) + Expect(session.OutputToString()).To(ContainSubstring(commandNotFound)) }) }) diff --git a/test/e2e/kill_test.go b/test/e2e/kill_test.go index 56f13117da..981cf8d760 100644 --- a/test/e2e/kill_test.go +++ b/test/e2e/kill_test.go @@ -1,10 +1,11 @@ package integration import ( + "path/filepath" + . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman kill", func() { @@ -12,7 +13,7 @@ var _ = Describe("Podman kill", func() { It("podman kill bogus container", func() { session := podmanTest.Podman([]string{"kill", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `no container with name or ID "foobar" found: no such container`)) }) It("podman container kill a running container by id", func() { @@ -85,7 +86,7 @@ var _ = Describe("Podman kill", func() { result := podmanTest.Podman([]string{"kill", "-s", "foobar", cid}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + Expect(result).Should(ExitWithError(125, "invalid signal: foobar")) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) }) @@ -126,15 +127,14 @@ var _ = Describe("Podman kill", func() { }) It("podman kill --cidfile", func() { - tmpDir := GinkgoT().TempDir() - tmpFile := tmpDir + "cid" + cidFile := filepath.Join(tempdir, "cid") - session := podmanTest.Podman([]string{"run", "-dt", "--cidfile", tmpFile, ALPINE, "top"}) + session := podmanTest.Podman([]string{"run", "-dt", "--cidfile", cidFile, ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) cid := session.OutputToStringArray()[0] - kill := podmanTest.Podman([]string{"kill", "--cidfile", tmpFile}) + kill := podmanTest.Podman([]string{"kill", "--cidfile", cidFile}) kill.WaitWithDefaultTimeout() Expect(kill).Should(ExitCleanly()) @@ -144,23 +144,20 @@ var _ = Describe("Podman kill", func() { }) It("podman kill multiple --cidfile", func() { - tmpDir1 := GinkgoT().TempDir() - tmpFile1 := tmpDir1 + "cid" - - tmpDir2 := GinkgoT().TempDir() - tmpFile2 := tmpDir2 + "cid" + cidFile1 := filepath.Join(tempdir, "cid1") + cidFile2 := filepath.Join(tempdir, "cid2") - session := podmanTest.Podman([]string{"run", "-dt", "--cidfile", tmpFile1, ALPINE, "top"}) + session := podmanTest.Podman([]string{"run", "-dt", "--cidfile", cidFile1, ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) cid1 := session.OutputToStringArray()[0] - session2 := podmanTest.Podman([]string{"run", "-dt", "--cidfile", tmpFile2, ALPINE, "top"}) + session2 := podmanTest.Podman([]string{"run", "-dt", "--cidfile", cidFile2, ALPINE, "top"}) session2.WaitWithDefaultTimeout() Expect(session2).Should(ExitCleanly()) cid2 := session2.OutputToStringArray()[0] - kill := podmanTest.Podman([]string{"kill", "--cidfile", tmpFile1, "--cidfile", tmpFile2}) + kill := podmanTest.Podman([]string{"kill", "--cidfile", cidFile1, "--cidfile", cidFile2}) kill.WaitWithDefaultTimeout() Expect(kill).Should(ExitCleanly()) diff --git a/test/e2e/load_test.go b/test/e2e/load_test.go index f0f701ed74..e4e190c30f 100644 --- a/test/e2e/load_test.go +++ b/test/e2e/load_test.go @@ -86,11 +86,7 @@ var _ = Describe("Podman load", func() { result := podmanTest.Podman([]string{"load", "-q", "--signature-policy", "/etc/containers/policy.json", "-i", outfile}) result.WaitWithDefaultTimeout() if IsRemote() { - Expect(result).To(ExitWithError()) - Expect(result.ErrorToString()).To(ContainSubstring("unknown flag")) - result = podmanTest.Podman([]string{"load", "-i", outfile}) - result.WaitWithDefaultTimeout() - Expect(result).Should(ExitCleanly()) + Expect(result).To(ExitWithError(125, "unknown flag: --signature-policy")) } else { Expect(result).Should(ExitCleanly()) } @@ -138,16 +134,13 @@ var _ = Describe("Podman load", func() { result := podmanTest.Podman([]string{"load", "-i", podmanTest.TempDir}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - - errMsg := fmt.Sprintf("remote client supports archives only but %q is a directory", podmanTest.TempDir) - Expect(result.ErrorToString()).To(ContainSubstring(errMsg)) + Expect(result).Should(ExitWithError(125, fmt.Sprintf("remote client supports archives only but %q is a directory", podmanTest.TempDir))) }) It("podman load bogus file", func() { save := podmanTest.Podman([]string{"load", "-i", "foobar.tar"}) save.WaitWithDefaultTimeout() - Expect(save).To(ExitWithError()) + Expect(save).To(ExitWithError(125, "faccessat foobar.tar: no such file or directory")) }) It("podman load multiple tags", func() { diff --git a/test/e2e/login_logout_test.go b/test/e2e/login_logout_test.go index a6ca4b69ae..f860f94436 100644 --- a/test/e2e/login_logout_test.go +++ b/test/e2e/login_logout_test.go @@ -122,8 +122,7 @@ var _ = Describe("Podman login and logout", func() { session = podmanTest.Podman([]string{"push", "-q", ALPINE, testImg}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring(": authentication required")) + Expect(session).To(ExitWithError(125, ": authentication required")) }) It("podman login and logout without registry parameter", func() { @@ -167,8 +166,7 @@ var _ = Describe("Podman login and logout", func() { // push should fail with nonexistent authfile session = podmanTest.Podman([]string{"push", "-q", "--authfile", "/tmp/nonexistent", ALPINE, testImg}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(Equal("Error: credential file is not accessible: stat /tmp/nonexistent: no such file or directory")) + Expect(session).To(ExitWithError(125, "credential file is not accessible: faccessat /tmp/nonexistent: no such file or directory")) session = podmanTest.Podman([]string{"push", "-q", "--authfile", authFile, ALPINE, testImg}) session.WaitWithDefaultTimeout() @@ -181,8 +179,7 @@ var _ = Describe("Podman login and logout", func() { // logout should fail with nonexistent authfile session = podmanTest.Podman([]string{"logout", "--authfile", "/tmp/nonexistent", server}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(Equal("Error: credential file is not accessible: stat /tmp/nonexistent: no such file or directory")) + Expect(session).To(ExitWithError(125, "credential file is not accessible: faccessat /tmp/nonexistent: no such file or directory")) session = podmanTest.Podman([]string{"logout", "--authfile", authFile, server}) session.WaitWithDefaultTimeout() @@ -205,8 +202,7 @@ var _ = Describe("Podman login and logout", func() { // logout should fail with nonexistent authfile session = podmanTest.Podman([]string{"logout", "--compat-auth-file", "/tmp/nonexistent", server}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(Equal("Error: credential file is not accessible: stat /tmp/nonexistent: no such file or directory")) + Expect(session).To(ExitWithError(125, "credential file is not accessible: faccessat /tmp/nonexistent: no such file or directory")) // inconsistent command line flags are rejected // Pre-create the files to make sure we are not hitting the “file not found” path @@ -219,13 +215,11 @@ var _ = Describe("Podman login and logout", func() { session = podmanTest.Podman([]string{"login", "--username", "podmantest", "--password", "test", "--authfile", authFile, "--compat-auth-file", compatAuthFile, server}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(Equal("Error: options for paths to the credential file and to the Docker-compatible credential file can not be set simultaneously")) + Expect(session).To(ExitWithError(125, "options for paths to the credential file and to the Docker-compatible credential file can not be set simultaneously")) session = podmanTest.Podman([]string{"logout", "--authfile", authFile, "--compat-auth-file", compatAuthFile, server}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(Equal("Error: options for paths to the credential file and to the Docker-compatible credential file can not be set simultaneously")) + Expect(session).To(ExitWithError(125, "options for paths to the credential file and to the Docker-compatible credential file can not be set simultaneously")) }) It("podman manifest with --authfile", func() { @@ -244,8 +238,7 @@ var _ = Describe("Podman login and logout", func() { session = podmanTest.Podman([]string{"manifest", "push", "-q", testImg}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring(": authentication required")) + Expect(session).To(ExitWithError(125, ": authentication required")) session = podmanTest.Podman([]string{"manifest", "push", "-q", "--authfile", authFile, testImg}) session.WaitWithDefaultTimeout() @@ -258,8 +251,7 @@ var _ = Describe("Podman login and logout", func() { session = podmanTest.Podman([]string{"manifest", "inspect", testImg}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring(": authentication required")) + Expect(session).To(ExitWithError(125, ": authentication required")) session = podmanTest.Podman([]string{"manifest", "inspect", "--authfile", authFile, testImg}) session.WaitWithDefaultTimeout() @@ -336,8 +328,7 @@ var _ = Describe("Podman login and logout", func() { session = podmanTest.Podman([]string{"push", "-q", ALPINE, "localhost:9001/test-alpine"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("/test-alpine: authentication required")) + Expect(session).To(ExitWithError(125, "/test-alpine: authentication required")) session = podmanTest.Podman([]string{"login", "--username", "podmantest", "--password", "test", "localhost:9001"}) session.WaitWithDefaultTimeout() @@ -357,8 +348,7 @@ var _ = Describe("Podman login and logout", func() { session = podmanTest.Podman([]string{"push", "-q", ALPINE, testImg}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("/test-alpine: authentication required")) + Expect(session).To(ExitWithError(125, "/test-alpine: authentication required")) session = podmanTest.Podman([]string{"push", "-q", ALPINE, "localhost:9001/test-alpine"}) session.WaitWithDefaultTimeout() @@ -374,13 +364,11 @@ var _ = Describe("Podman login and logout", func() { session = podmanTest.Podman([]string{"push", "-q", ALPINE, testImg}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("/test-alpine: authentication required")) + Expect(session).To(ExitWithError(125, "/test-alpine: authentication required")) session = podmanTest.Podman([]string{"push", "-q", ALPINE, "localhost:9001/test-alpine"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("/test-alpine: authentication required")) + Expect(session).To(ExitWithError(125, "/test-alpine: authentication required")) }) It("podman login and logout with repository", func() { @@ -533,8 +521,7 @@ var _ = Describe("Podman login and logout", func() { ALPINE, server + "/podmantest/test-image", }) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("/test-image: authentication required")) + Expect(session).To(ExitWithError(125, "/test-image: authentication required")) session = podmanTest.Podman([]string{ "push", "-q", @@ -581,7 +568,6 @@ var _ = Describe("Podman login and logout", func() { server + "/podmantest/test-alpine", }) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("/test-alpine: authentication required")) + Expect(session).To(ExitWithError(125, "/test-alpine: authentication required")) }) }) diff --git a/test/e2e/logs_test.go b/test/e2e/logs_test.go index acfb950997..16392fbbd9 100644 --- a/test/e2e/logs_test.go +++ b/test/e2e/logs_test.go @@ -29,8 +29,7 @@ var _ = Describe("Podman logs", func() { It("podman logs on not existent container", func() { results := podmanTest.Podman([]string{"logs", "notexist"}) results.WaitWithDefaultTimeout() - Expect(results).To(Exit(125)) - Expect(results.ErrorToString()).To(Equal(`Error: no container with name or ID "notexist" found: no such container`)) + Expect(results).To(ExitWithError(125, `no container with name or ID "notexist" found: no such container`)) }) for _, log := range []string{"k8s-file", "journald", "json-file"} { @@ -270,7 +269,11 @@ var _ = Describe("Podman logs", func() { results := podmanTest.Podman([]string{"logs", "-l", "foobar"}) results.WaitWithDefaultTimeout() - Expect(results).To(ExitWithError()) + if IsRemote() { + Expect(results).To(ExitWithError(125, "unknown shorthand flag: 'l' in -l")) + } else { + Expect(results).To(ExitWithError(125, "--latest and containers cannot be used together")) + } }) It("two containers showing short container IDs: "+log, func() { @@ -326,8 +329,7 @@ var _ = Describe("Podman logs", func() { if log == "journald" && !isEventBackendJournald(podmanTest) { // --follow + journald log-driver is only supported with journald events-backend(PR #10431) - Expect(results).To(Exit(125)) - Expect(results.ErrorToString()).To(ContainSubstring("using --follow with the journald --log-driver but without the journald --events-backend")) + Expect(results).To(ExitWithError(125, "using --follow with the journald --log-driver but without the journald --events-backend")) return } @@ -366,8 +368,7 @@ var _ = Describe("Podman logs", func() { results.WaitWithDefaultTimeout() if log == "journald" && !isEventBackendJournald(podmanTest) { // --follow + journald log-driver is only supported with journald events-backend(PR #10431) - Expect(results).To(Exit(125)) - Expect(results.ErrorToString()).To(ContainSubstring("using --follow with the journald --log-driver but without the journald --events-backend")) + Expect(results).To(ExitWithError(125, "using --follow with the journald --log-driver but without the journald --events-backend")) return } Expect(results).To(ExitCleanly()) @@ -581,8 +582,7 @@ var _ = Describe("Podman logs", func() { logs := podmanTest.Podman([]string{"logs", "-f", ctrName}) logs.WaitWithDefaultTimeout() - Expect(logs).To(Exit(125)) - Expect(logs.ErrorToString()).To(ContainSubstring("this container is using the 'none' log driver, cannot read logs: this container is not logging output")) + Expect(logs).To(ExitWithError(125, "this container is using the 'none' log driver, cannot read logs: this container is not logging output")) }) It("podman logs with non ASCII log tag fails without correct LANG", func() { @@ -595,14 +595,10 @@ var _ = Describe("Podman logs", func() { defer cleanup() logc := podmanTest.Podman([]string{"run", "--log-driver", "journald", "--log-opt", "tag=äöüß", ALPINE, "echo", "podman"}) logc.WaitWithDefaultTimeout() - Expect(logc).To(Exit(126)) - // FIXME-2023-09-26: conmon <2.1.8 logs to stdout; clean this up once >=2.1.8 is universal - errmsg := logc.ErrorToString() + logc.OutputToString() + Expect(logc).To(ExitWithError(126, "conmon failed: exit status 1")) if !IsRemote() { - // Error is only seen on local client - Expect(errmsg).To(ContainSubstring("conmon: option parsing failed: Invalid byte sequence in conversion input")) + Expect(logc.ErrorToString()).To(ContainSubstring("conmon: option parsing failed: Invalid byte sequence in conversion input")) } - Expect(errmsg).To(ContainSubstring("conmon failed: exit status 1")) }) It("podman logs with non ASCII log tag succeeds with proper env", func() { diff --git a/test/e2e/manifest_test.go b/test/e2e/manifest_test.go index 297c32662c..dba357c18b 100644 --- a/test/e2e/manifest_test.go +++ b/test/e2e/manifest_test.go @@ -2,6 +2,7 @@ package integration import ( "encoding/json" + "fmt" "os" "path/filepath" "strings" @@ -16,6 +17,35 @@ import ( imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) +// validateManifestHasAllArchs checks that the specified manifest has all +// the archs in `imageList` +func validateManifestHasAllArchs(path string) error { + data, err := os.ReadFile(path) + if err != nil { + return err + } + var result struct { + Manifests []struct { + Platform struct { + Architecture string + } + } + } + if err := json.Unmarshal(data, &result); err != nil { + return err + } + archs := map[string]bool{ + "amd64": false, + "arm64": false, + "ppc64le": false, + "s390x": false, + } + for _, m := range result.Manifests { + archs[m.Platform.Architecture] = true + } + return nil +} + // Internal function to verify instance compression func verifyInstanceCompression(descriptor []imgspecv1.Descriptor, compression string, arch string) bool { for _, instance := range descriptor { @@ -54,13 +84,12 @@ var _ = Describe("Podman manifest", func() { session = podmanTest.Podman([]string{"manifest", "create", "foo"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `image name "localhost/foo:latest" is already associated with image `)) session = podmanTest.Podman([]string{"manifest", "push", "--all", "foo"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - // Push should actually fail since its not valid registry - Expect(session.ErrorToString()).To(ContainSubstring("requested access to the resource is denied")) + // Push should actually fail since it's not valid registry + Expect(session).To(ExitWithError(125, "requested access to the resource is denied")) Expect(session.OutputToString()).To(Not(ContainSubstring("accepts 2 arg(s), received 1"))) session = podmanTest.Podman([]string{"manifest", "create", amend, "foo"}) @@ -169,9 +198,9 @@ var _ = Describe("Podman manifest", func() { err := podmanTest.RestoreArtifact(REGISTRY_IMAGE) Expect(err).ToNot(HaveOccurred()) } - lock := GetPortLock("5000") + lock := GetPortLock("5007") defer lock.Unlock() - session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", "5000:5000", REGISTRY_IMAGE, "/entrypoint.sh", "/etc/docker/registry/config.yml"}) + session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", "5007:5000", REGISTRY_IMAGE, "/entrypoint.sh", "/etc/docker/registry/config.yml"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) @@ -197,19 +226,18 @@ var _ = Describe("Podman manifest", func() { session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - push := podmanTest.Podman([]string{"manifest", "push", "--all", "--add-compression", "zstd", "--tls-verify=false", "--remove-signatures", "foobar", "localhost:5000/list"}) + push := podmanTest.Podman([]string{"manifest", "push", "--all", "--compression-format", "gzip", "--add-compression", "zstd", "--tls-verify=false", "--remove-signatures", "foobar", "localhost:5007/list"}) push.WaitWithDefaultTimeout() Expect(push).Should(Exit(0)) output := push.ErrorToString() // 4 images must be pushed two for gzip and two for zstd Expect(output).To(ContainSubstring("Copying 4 images generated from 2 images in list")) - session = podmanTest.Podman([]string{"run", "--rm", "--net", "host", "quay.io/skopeo/stable", "inspect", "--tls-verify=false", "--raw", "docker://localhost:5000/list:latest"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(0)) + skopeo := SystemExec("skopeo", []string{"inspect", "--tls-verify=false", "--raw", "docker://localhost:5007/list:latest"}) + Expect(skopeo).Should(ExitCleanly()) + inspectData := []byte(skopeo.OutputToString()) var index imgspecv1.Index - inspectData := []byte(session.OutputToString()) - err := json.Unmarshal(inspectData, &index) + err = json.Unmarshal(inspectData, &index) Expect(err).ToNot(HaveOccurred()) Expect(verifyInstanceCompression(index.Manifests, "zstd", "amd64")).Should(BeTrue()) @@ -218,17 +246,16 @@ var _ = Describe("Podman manifest", func() { Expect(verifyInstanceCompression(index.Manifests, "gzip", "amd64")).Should(BeTrue()) // Note: Pushing again with --force-compression should produce the correct response the since blobs will be correctly force-pushed again. - push = podmanTest.Podman([]string{"manifest", "push", "--all", "--add-compression", "zstd", "--tls-verify=false", "--compression-format", "gzip", "--force-compression", "--remove-signatures", "foobar", "localhost:5000/list"}) + push = podmanTest.Podman([]string{"manifest", "push", "--all", "--add-compression", "zstd", "--tls-verify=false", "--compression-format", "gzip", "--force-compression", "--remove-signatures", "foobar", "localhost:5007/list"}) push.WaitWithDefaultTimeout() Expect(push).Should(Exit(0)) output = push.ErrorToString() // 4 images must be pushed two for gzip and two for zstd Expect(output).To(ContainSubstring("Copying 4 images generated from 2 images in list")) - session = podmanTest.Podman([]string{"run", "--rm", "--net", "host", "quay.io/skopeo/stable", "inspect", "--tls-verify=false", "--raw", "docker://localhost:5000/list:latest"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - inspectData = []byte(session.OutputToString()) + skopeo = SystemExec("skopeo", []string{"inspect", "--tls-verify=false", "--raw", "docker://localhost:5007/list:latest"}) + Expect(skopeo).Should(ExitCleanly()) + inspectData = []byte(skopeo.OutputToString()) err = json.Unmarshal(inspectData, &index) Expect(err).ToNot(HaveOccurred()) @@ -244,17 +271,16 @@ add_compression = ["zstd"]`), 0o644) Expect(err).ToNot(HaveOccurred()) os.Setenv("CONTAINERS_CONF", confFile) - push = podmanTest.Podman([]string{"manifest", "push", "--all", "--tls-verify=false", "--compression-format", "gzip", "--force-compression", "--remove-signatures", "foobar", "localhost:5000/list"}) + push = podmanTest.Podman([]string{"manifest", "push", "--all", "--tls-verify=false", "--compression-format", "gzip", "--force-compression", "--remove-signatures", "foobar", "localhost:5007/list"}) push.WaitWithDefaultTimeout() Expect(push).Should(Exit(0)) output = push.ErrorToString() // 4 images must be pushed two for gzip and two for zstd Expect(output).To(ContainSubstring("Copying 4 images generated from 2 images in list")) - session = podmanTest.Podman([]string{"run", "--rm", "--net", "host", "quay.io/skopeo/stable", "inspect", "--tls-verify=false", "--raw", "docker://localhost:5000/list:latest"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - inspectData = []byte(session.OutputToString()) + skopeo = SystemExec("skopeo", []string{"inspect", "--tls-verify=false", "--raw", "docker://localhost:5007/list:latest"}) + Expect(skopeo).Should(ExitCleanly()) + inspectData = []byte(skopeo.OutputToString()) err = json.Unmarshal(inspectData, &index) Expect(err).ToNot(HaveOccurred()) @@ -265,17 +291,16 @@ add_compression = ["zstd"]`), 0o644) // Note: Pushing again with --force-compression=false should produce in-correct/wrong result since blobs are already present in registry so they will be reused // ignoring our compression priority ( this is expected behaviour of c/image and --force-compression is introduced to mitigate this behaviour ). - push = podmanTest.Podman([]string{"manifest", "push", "--all", "--add-compression", "zstd", "--force-compression=false", "--tls-verify=false", "--remove-signatures", "foobar", "localhost:5000/list"}) + push = podmanTest.Podman([]string{"manifest", "push", "--all", "--add-compression", "zstd", "--force-compression=false", "--tls-verify=false", "--remove-signatures", "foobar", "localhost:5007/list"}) push.WaitWithDefaultTimeout() Expect(push).Should(Exit(0)) output = push.ErrorToString() // 4 images must be pushed two for gzip and two for zstd Expect(output).To(ContainSubstring("Copying 4 images generated from 2 images in list")) - session = podmanTest.Podman([]string{"run", "--rm", "--net", "host", "quay.io/skopeo/stable", "inspect", "--tls-verify=false", "--raw", "docker://localhost:5000/list:latest"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - inspectData = []byte(session.OutputToString()) + skopeo = SystemExec("skopeo", []string{"inspect", "--tls-verify=false", "--raw", "docker://localhost:5007/list:latest"}) + Expect(skopeo).Should(ExitCleanly()) + inspectData = []byte(skopeo.OutputToString()) err = json.Unmarshal(inspectData, &index) Expect(err).ToNot(HaveOccurred()) @@ -312,8 +337,8 @@ add_compression = ["zstd"]`), 0o644) Expect(session).Should(ExitCleanly()) session = podmanTest.Podman([]string{"manifest", "add", "--annotation", "hoge", "foo", imageList}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("no value given for annotation")) + Expect(session).Should(ExitWithError(125, "no value given for annotation")) + session = podmanTest.Podman([]string{"manifest", "add", "--annotation", "hoge=fuga", "--annotation", "key=val,withcomma", "foo", imageList}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) @@ -392,9 +417,18 @@ add_compression = ["zstd"]`), 0o644) session = podmanTest.Podman([]string{"manifest", "add", "foo", imageList}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - session = podmanTest.Podman([]string{"manifest", "remove", "foo", "sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"}) + bogusID := "sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" + session = podmanTest.Podman([]string{"manifest", "remove", "foo", bogusID}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + + // FIXME-someday: figure out why message differs in podman-remote + expectMessage := "removing from manifest list foo: " + if IsRemote() { + expectMessage += "removing from manifest foo" + } else { + expectMessage += fmt.Sprintf(`no instance matching digest %q found in manifest list: file does not exist`, bogusID) + } + Expect(session).To(ExitWithError(125, expectMessage)) session = podmanTest.Podman([]string{"manifest", "rm", "foo"}) session.WaitWithDefaultTimeout() @@ -419,19 +453,8 @@ add_compression = ["zstd"]`), 0o644) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - files, err := filepath.Glob(dest + string(os.PathSeparator) + "*") - Expect(err).ShouldNot(HaveOccurred()) - check := SystemExec("sha256sum", files) - check.WaitWithDefaultTimeout() - Expect(check).Should(ExitCleanly()) - prefix := "sha256:" - Expect(check.OutputToString()).To( - And( - ContainSubstring(strings.TrimPrefix(imageListAMD64InstanceDigest, prefix)), - ContainSubstring(strings.TrimPrefix(imageListPPC64LEInstanceDigest, prefix)), - ContainSubstring(strings.TrimPrefix(imageListS390XInstanceDigest, prefix)), - ContainSubstring(strings.TrimPrefix(imageListARM64InstanceDigest, prefix)), - )) + err = validateManifestHasAllArchs(filepath.Join(dest, "manifest.json")) + Expect(err).ToNot(HaveOccurred()) }) It("push", func() { @@ -451,20 +474,9 @@ add_compression = ["zstd"]`), 0o644) session = podmanTest.Podman([]string{"push", "-q", "foo", "dir:" + dest}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - files, err := filepath.Glob(dest + string(os.PathSeparator) + "*") - Expect(err).ToNot(HaveOccurred()) - check := SystemExec("sha256sum", files) - check.WaitWithDefaultTimeout() - Expect(check).Should(ExitCleanly()) - prefix := "sha256:" - Expect(check.OutputToString()).To( - And( - ContainSubstring(strings.TrimPrefix(imageListAMD64InstanceDigest, prefix)), - ContainSubstring(strings.TrimPrefix(imageListPPC64LEInstanceDigest, prefix)), - ContainSubstring(strings.TrimPrefix(imageListS390XInstanceDigest, prefix)), - ContainSubstring(strings.TrimPrefix(imageListARM64InstanceDigest, prefix)), - )) + err = validateManifestHasAllArchs(filepath.Join(dest, "manifest.json")) + Expect(err).ToNot(HaveOccurred()) }) It("push with compression-format and compression-level", func() { @@ -486,9 +498,7 @@ RUN touch /file tmpDir := filepath.Join(podmanTest.TempDir, "wrong-compression") session = podmanTest.Podman([]string{"manifest", "push", "--compression-format", "gzip", "--compression-level", "50", "foo", "oci:" + tmpDir}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - output := session.ErrorToString() - Expect(output).To(ContainSubstring("invalid compression level")) + Expect(session).Should(ExitWithError(125, "invalid compression level")) dest := filepath.Join(podmanTest.TempDir, "pushed") err := os.MkdirAll(dest, os.ModePerm) @@ -557,7 +567,7 @@ RUN touch /file push := podmanTest.Podman([]string{"manifest", "push", "--all", "--tls-verify=false", "--remove-signatures", "foo", "localhost:7000/bogus"}) push.WaitWithDefaultTimeout() - Expect(push).Should(Exit(125)) + Expect(push).Should(ExitWithError(125, "Failed, retrying in 1s ... (1/3)")) Expect(push.ErrorToString()).To(MatchRegexp("Copying blob.*Failed, retrying in 1s \\.\\.\\. \\(1/3\\).*Copying blob.*Failed, retrying in 2s")) }) @@ -590,7 +600,8 @@ RUN touch /file push := podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--creds=" + registry.User + ":" + registry.Password, "--format=v2s2", "localhost:" + registry.Port + "/citest:latest"}) push.WaitWithDefaultTimeout() - Expect(push).Should(ExitCleanly()) + // Cannot ExitCleanly() because this sometimes warns "Failed, retrying in 1s" + Expect(push).Should(Exit(0)) session = podmanTest.Podman([]string{"manifest", "add", "--tls-verify=false", "--creds=" + registry.User + ":" + registry.Password, "foo", "localhost:" + registry.Port + "/citest:latest"}) session.WaitWithDefaultTimeout() @@ -606,8 +617,7 @@ RUN touch /file push = podmanTest.Podman([]string{"manifest", "push", "--compression-format=gzip", "--compression-level=2", "--tls-verify=false", "--creds=podmantest:wrongpasswd", "foo", "localhost:" + registry.Port + "/credstest"}) push.WaitWithDefaultTimeout() - Expect(push).To(ExitWithError()) - Expect(push.ErrorToString()).To(ContainSubstring(": authentication required")) + Expect(push).To(ExitWithError(125, ": authentication required")) // push --rm after pull image (#15033) push = podmanTest.Podman([]string{"manifest", "push", "-q", "--rm", "--tls-verify=false", "--creds=" + registry.User + ":" + registry.Password, "foo", "localhost:" + registry.Port + "/rmtest"}) @@ -623,8 +633,7 @@ RUN touch /file It("push with error", func() { session := podmanTest.Podman([]string{"manifest", "push", "badsrcvalue", "baddestvalue"}) session.WaitWithDefaultTimeout() - Expect(session).Should(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("retrieving local image from image name badsrcvalue: badsrcvalue: image not known")) + Expect(session).Should(ExitWithError(125, "retrieving local image from image name badsrcvalue: badsrcvalue: image not known")) }) It("push --rm to local directory", func() { @@ -646,8 +655,8 @@ RUN touch /file Expect(session).Should(ExitCleanly()) session = podmanTest.Podman([]string{"manifest", "push", "-p", "foo", "dir:" + dest}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("retrieving local image from image name foo: foo: image not known")) + Expect(session).Should(ExitWithError(125, "retrieving local image from image name foo: foo: image not known")) + session = podmanTest.Podman([]string{"images", "-q", "foo"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) @@ -674,9 +683,9 @@ RUN touch /file session = podmanTest.Podman([]string{"manifest", "rm", "foo", "bar"}) session.WaitWithDefaultTimeout() - Expect(session).Should(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("foo: image not known")) - Expect(session.ErrorToString()).To(ContainSubstring("bar: image not known")) + Expect(session).Should(ExitWithError(1, " 2 errors occurred:")) + Expect(session.ErrorToString()).To(ContainSubstring("* foo: image not known")) + Expect(session.ErrorToString()).To(ContainSubstring("* bar: image not known")) }) It("exists", func() { @@ -691,7 +700,7 @@ RUN touch /file session = podmanTest.Podman([]string{"manifest", "exists", "no-manifest"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "")) }) It("rm should not remove referenced images", func() { @@ -724,8 +733,7 @@ RUN touch /file // manifest rm should fail with `image is not a manifest list` session := podmanTest.Podman([]string{"manifest", "rm", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("image is not a manifest list")) + Expect(session).Should(ExitWithError(125, "image is not a manifest list")) manifestName := "testmanifest:sometag" session = podmanTest.Podman([]string{"manifest", "create", manifestName}) @@ -745,6 +753,6 @@ RUN touch /file // verify that manifest should not exist session = podmanTest.Podman([]string{"manifest", "exists", manifestName}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "")) }) }) diff --git a/test/e2e/mount_rootless_test.go b/test/e2e/mount_rootless_test.go index b8360dfa9c..7e4af2eb68 100644 --- a/test/e2e/mount_rootless_test.go +++ b/test/e2e/mount_rootless_test.go @@ -1,6 +1,8 @@ package integration import ( + "slices" + . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -21,8 +23,7 @@ var _ = Describe("Podman mount", func() { mount := podmanTest.Podman([]string{"mount", cid}) mount.WaitWithDefaultTimeout() - Expect(mount).To(ExitWithError()) - Expect(mount.ErrorToString()).To(ContainSubstring("podman unshare")) + Expect(mount).To(ExitWithError(125, "must execute `podman unshare` first")) }) It("podman unshare podman mount", func() { @@ -48,8 +49,7 @@ var _ = Describe("Podman mount", func() { podmanTest.AddImageToRWStore(ALPINE) mount := podmanTest.Podman([]string{"image", "mount", ALPINE}) mount.WaitWithDefaultTimeout() - Expect(mount).To(ExitWithError()) - Expect(mount.ErrorToString()).To(ContainSubstring("podman unshare")) + Expect(mount).To(ExitWithError(125, "must execute `podman unshare` first")) }) It("podman unshare image podman mount", func() { @@ -65,5 +65,14 @@ var _ = Describe("Podman mount", func() { session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) Expect(session.OutputToString()).To(ContainSubstring(podmanTest.TempDir)) + + // We have to unmount the image again otherwise we leak the tmpdir + // as active mount points cannot be removed. + index := slices.Index(args, "mount") + Expect(index).To(BeNumerically(">", 0), "index should be found") + args[index] = "unmount" + session = podmanTest.Podman(args) + session.WaitWithDefaultTimeout() + Expect(session).Should(ExitCleanly()) }) }) diff --git a/test/e2e/mount_test.go b/test/e2e/mount_test.go index 4fccde591d..aba488528f 100644 --- a/test/e2e/mount_test.go +++ b/test/e2e/mount_test.go @@ -59,10 +59,9 @@ var _ = Describe("Podman mount", func() { Expect(j).Should(ExitCleanly()) Expect(j.OutputToString()).To(BeValidJSON()) - j = podmanTest.Podman([]string{"mount", "--format='{{.foobar}}'"}) + j = podmanTest.Podman([]string{"mount", "--format={{.foobar}}"}) j.WaitWithDefaultTimeout() - Expect(j).To(ExitWithError()) - Expect(j.ErrorToString()).To(ContainSubstring("unknown --format")) + Expect(j).To(ExitWithError(125, `unknown --format argument: "{{.foobar}}"`)) umount := podmanTest.Podman([]string{"umount", cid}) umount.WaitWithDefaultTimeout() diff --git a/test/e2e/negative_test.go b/test/e2e/negative_test.go index 7e6bfca752..f6cae5352a 100644 --- a/test/e2e/negative_test.go +++ b/test/e2e/negative_test.go @@ -1,6 +1,8 @@ package integration import ( + "fmt" + . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -11,6 +13,10 @@ var _ = Describe("Podman negative command-line", func() { It("podman snuffleupagus exits non-zero", func() { session := podmanTest.Podman([]string{"snuffleupagus"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + cmdName := "podman" + if IsRemote() { + cmdName += "-remote" + } + Expect(session).To(ExitWithError(125, fmt.Sprintf("unrecognized command `%s snuffleupagus`", cmdName))) }) }) diff --git a/test/e2e/network_connect_disconnect_test.go b/test/e2e/network_connect_disconnect_test.go index cb3a1aae1d..3764b44bc7 100644 --- a/test/e2e/network_connect_disconnect_test.go +++ b/test/e2e/network_connect_disconnect_test.go @@ -1,11 +1,12 @@ package integration import ( + "fmt" + . "github.com/containers/podman/v5/test/utils" "github.com/containers/storage/pkg/stringid" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" "github.com/onsi/gomega/types" ) @@ -14,7 +15,7 @@ var _ = Describe("Podman network connect and disconnect", func() { It("bad network name in disconnect should result in error", func() { dis := podmanTest.Podman([]string{"network", "disconnect", "foobar", "test"}) dis.WaitWithDefaultTimeout() - Expect(dis).Should(ExitWithError()) + Expect(dis).Should(ExitWithError(125, `no container with name or ID "test" found: no such container`)) }) It("bad container name in network disconnect should result in error", func() { @@ -26,7 +27,7 @@ var _ = Describe("Podman network connect and disconnect", func() { dis := podmanTest.Podman([]string{"network", "disconnect", netName, "foobar"}) dis.WaitWithDefaultTimeout() - Expect(dis).Should(ExitWithError()) + Expect(dis).Should(ExitWithError(125, `no container with name or ID "foobar" found: no such container`)) }) It("network disconnect with net mode slirp4netns should result in error", func() { @@ -43,8 +44,7 @@ var _ = Describe("Podman network connect and disconnect", func() { con := podmanTest.Podman([]string{"network", "disconnect", netName, "test"}) con.WaitWithDefaultTimeout() - Expect(con).Should(ExitWithError()) - Expect(con.ErrorToString()).To(ContainSubstring(`"slirp4netns" is not supported: invalid network mode`)) + Expect(con).Should(ExitWithError(125, `"slirp4netns" is not supported: invalid network mode`)) }) It("podman network disconnect", func() { @@ -85,7 +85,7 @@ var _ = Describe("Podman network connect and disconnect", func() { exec = podmanTest.Podman([]string{"exec", "test", "ip", "addr", "show", "eth0"}) exec.WaitWithDefaultTimeout() - Expect(exec).Should(ExitWithError()) + Expect(exec).Should(ExitWithError(1, "ip: can't find device 'eth0'")) exec3 := podmanTest.Podman([]string{"exec", "test", "cat", "/etc/resolv.conf"}) exec3.WaitWithDefaultTimeout() @@ -99,9 +99,13 @@ var _ = Describe("Podman network connect and disconnect", func() { }) It("bad network name in connect should result in error", func() { - dis := podmanTest.Podman([]string{"network", "connect", "foobar", "test"}) + session := podmanTest.Podman([]string{"create", "--name", "testContainer", "--network", "bridge", ALPINE}) + session.WaitWithDefaultTimeout() + Expect(session).Should(ExitCleanly()) + + dis := podmanTest.Podman([]string{"network", "connect", "nonexistent-network", "testContainer"}) dis.WaitWithDefaultTimeout() - Expect(dis).Should(ExitWithError()) + Expect(dis).Should(ExitWithError(125, "unable to find network with name or ID nonexistent-network: network not found")) }) It("bad container name in network connect should result in error", func() { @@ -113,7 +117,7 @@ var _ = Describe("Podman network connect and disconnect", func() { dis := podmanTest.Podman([]string{"network", "connect", netName, "foobar"}) dis.WaitWithDefaultTimeout() - Expect(dis).Should(ExitWithError()) + Expect(dis).Should(ExitWithError(125, `no container with name or ID "foobar" found: no such container`)) }) It("network connect with net mode slirp4netns should result in error", func() { @@ -130,8 +134,7 @@ var _ = Describe("Podman network connect and disconnect", func() { con := podmanTest.Podman([]string{"network", "connect", netName, "test"}) con.WaitWithDefaultTimeout() - Expect(con).Should(ExitWithError()) - Expect(con.ErrorToString()).To(ContainSubstring(`"slirp4netns" is not supported: invalid network mode`)) + Expect(con).Should(ExitWithError(125, `"slirp4netns" is not supported: invalid network mode`)) }) It("podman connect on a container that already is connected to the network should error after init", func() { @@ -162,7 +165,11 @@ var _ = Describe("Podman network connect and disconnect", func() { con2 := podmanTest.Podman([]string{"network", "connect", netName, "test"}) con2.WaitWithDefaultTimeout() - Expect(con2).Should(ExitWithError()) + if podmanTest.DatabaseBackend == "boltdb" { + Expect(con2).Should(ExitWithError(125, fmt.Sprintf("container %s is already connected to network %q: network is already connected", cid, netName))) + } else { + Expect(con2).Should(ExitWithError(125, fmt.Sprintf("container %s is already connected to network %s: network is already connected", cid, netName))) + } }) It("podman network connect", func() { @@ -359,11 +366,12 @@ var _ = Describe("Podman network connect and disconnect", func() { exec := podmanTest.Podman([]string{"exec", "test", "ip", "addr", "show", "eth0"}) exec.WaitWithDefaultTimeout() - // because the network interface order is not guaranteed to be the same we have to check both eth0 and eth1 - // if eth0 did not exists eth1 has to exists - var exitMatcher types.GomegaMatcher = ExitWithError() + // because the network interface order is not guaranteed to be the same, we have to check both eth0 and eth1. + // if eth0 did not exist, eth1 has to exist. + var exitMatcher types.GomegaMatcher = ExitWithError(1, "ip: can't find device 'eth1'") if exec.ExitCode() > 0 { - exitMatcher = Exit(0) + Expect(exec).To(ExitWithError(1, "ip: can't find device 'eth0'")) + exitMatcher = ExitCleanly() } exec = podmanTest.Podman([]string{"exec", "test", "ip", "addr", "show", "eth1"}) @@ -402,6 +410,6 @@ var _ = Describe("Podman network connect and disconnect", func() { exec = podmanTest.Podman([]string{"exec", "test", "ip", "addr", "show", "eth0"}) exec.WaitWithDefaultTimeout() - Expect(exec).Should(ExitWithError()) + Expect(exec).Should(ExitWithError(1, "ip: can't find device 'eth0'")) }) }) diff --git a/test/e2e/network_create_test.go b/test/e2e/network_create_test.go index 764f97cdbb..0ce6be1a52 100644 --- a/test/e2e/network_create_test.go +++ b/test/e2e/network_create_test.go @@ -2,9 +2,11 @@ package integration import ( "encoding/json" + "fmt" "net" "github.com/containers/common/libnetwork/types" + "github.com/containers/podman/v5/pkg/domain/entities" . "github.com/containers/podman/v5/test/utils" "github.com/containers/storage/pkg/stringid" . "github.com/onsi/ginkgo/v2" @@ -32,7 +34,7 @@ var _ = Describe("Podman network create", func() { Expect(inspect).Should(ExitCleanly()) // JSON the network configuration into something usable - var results []types.Network + var results []entities.NetworkInspectReport err := json.Unmarshal([]byte(inspect.OutputToString()), &results) Expect(err).ToNot(HaveOccurred()) Expect(results).To(HaveLen(1)) @@ -84,7 +86,7 @@ var _ = Describe("Podman network create", func() { Expect(inspect).Should(ExitCleanly()) // JSON the network configuration into something usable - var results []types.Network + var results []entities.NetworkInspectReport err := json.Unmarshal([]byte(inspect.OutputToString()), &results) Expect(err).ToNot(HaveOccurred()) Expect(results).To(HaveLen(1)) @@ -125,7 +127,7 @@ var _ = Describe("Podman network create", func() { Expect(inspect).Should(ExitCleanly()) // JSON the network configuration into something usable - var results []types.Network + var results []entities.NetworkInspectReport err := json.Unmarshal([]byte(inspect.OutputToString()), &results) Expect(err).ToNot(HaveOccurred()) Expect(results).To(HaveLen(1)) @@ -168,7 +170,7 @@ var _ = Describe("Podman network create", func() { Expect(inspect).Should(ExitCleanly()) // JSON the network configuration into something usable - var results []types.Network + var results []entities.NetworkInspectReport err := json.Unmarshal([]byte(inspect.OutputToString()), &results) Expect(err).ToNot(HaveOccurred()) Expect(results).To(HaveLen(1)) @@ -213,7 +215,7 @@ var _ = Describe("Podman network create", func() { Expect(inspect).Should(ExitCleanly()) // JSON the network configuration into something usable - var results []types.Network + var results []entities.NetworkInspectReport err := json.Unmarshal([]byte(inspect.OutputToString()), &results) Expect(err).ToNot(HaveOccurred()) Expect(results).To(HaveLen(1)) @@ -254,7 +256,7 @@ var _ = Describe("Podman network create", func() { Expect(inspect).Should(ExitCleanly()) // JSON the network configuration into something usable - var results []types.Network + var results []entities.NetworkInspectReport err := json.Unmarshal([]byte(inspect.OutputToString()), &results) Expect(err).ToNot(HaveOccurred()) Expect(results).To(HaveLen(1)) @@ -284,7 +286,7 @@ var _ = Describe("Podman network create", func() { Expect(inspect).Should(ExitCleanly()) // JSON the network configuration into something usable - var results []types.Network + var results []entities.NetworkInspectReport err := json.Unmarshal([]byte(inspect.OutputToString()), &results) Expect(err).ToNot(HaveOccurred()) Expect(results).To(HaveLen(1)) @@ -323,7 +325,7 @@ var _ = Describe("Podman network create", func() { Expect(inspect).Should(ExitCleanly()) // JSON the network configuration into something usable - var results []types.Network + var results []entities.NetworkInspectReport err := json.Unmarshal([]byte(inspect.OutputToString()), &results) Expect(err).ToNot(HaveOccurred()) Expect(results).To(HaveLen(1)) @@ -394,7 +396,7 @@ var _ = Describe("Podman network create", func() { It("podman network create with invalid subnet", func() { nc := podmanTest.Podman([]string{"network", "create", "--subnet", "10.11.12.0/17000", stringid.GenerateRandomID()}) nc.WaitWithDefaultTimeout() - Expect(nc).To(ExitWithError()) + Expect(nc).To(ExitWithError(125, "invalid CIDR address: 10.11.12.0/17000")) }) It("podman network create with ipv4 subnet and ipv6 flag", func() { @@ -425,16 +427,25 @@ var _ = Describe("Podman network create", func() { Expect(nc.OutputToString()).To(ContainSubstring(`.0/24`)) }) - It("podman network create with invalid IP", func() { - nc := podmanTest.Podman([]string{"network", "create", "--subnet", "10.11.0/17000", stringid.GenerateRandomID()}) + It("podman network create with invalid IP arguments", func() { + nc := podmanTest.Podman([]string{"network", "create", "--subnet", "10.11.12.0/24", "--ip-range", "10.11.12.345-10.11.12.999"}) nc.WaitWithDefaultTimeout() - Expect(nc).To(ExitWithError()) + Expect(nc).To(ExitWithError(125, `range start ip "10.11.12.345" is not a ip address`)) + + nc = podmanTest.Podman([]string{"network", "create", "--subnet", "10.11.12.0/24", "--ip-range", "10.11.12.3-10.11.12.999"}) + nc.WaitWithDefaultTimeout() + Expect(nc).To(ExitWithError(125, `range end ip "10.11.12.999" is not a ip address`)) + + nc = podmanTest.Podman([]string{"network", "create", "--gateway", "10.11.12.256"}) + nc.WaitWithDefaultTimeout() + Expect(nc).To(ExitWithError(125, `invalid argument "10.11.12.256" for "--gateway" flag: invalid string being converted to IP address: 10.11.12.256`)) + }) It("podman network create with invalid gateway for subnet", func() { nc := podmanTest.Podman([]string{"network", "create", "--subnet", "10.11.12.0/24", "--gateway", "192.168.1.1", stringid.GenerateRandomID()}) nc.WaitWithDefaultTimeout() - Expect(nc).To(ExitWithError()) + Expect(nc).To(ExitWithError(125, "gateway 192.168.1.1 not in subnet 10.11.12.0/24")) }) It("podman network create two networks with same name should fail", func() { @@ -446,7 +457,7 @@ var _ = Describe("Podman network create", func() { ncFail := podmanTest.Podman([]string{"network", "create", netName}) ncFail.WaitWithDefaultTimeout() - Expect(ncFail).To(ExitWithError()) + Expect(ncFail).To(ExitWithError(125, fmt.Sprintf("network name %s already used: network already exists", netName))) }) It("podman network create two networks with same subnet should fail", func() { @@ -460,7 +471,7 @@ var _ = Describe("Podman network create", func() { ncFail := podmanTest.Podman([]string{"network", "create", "--subnet", "10.11.13.0/24", netName2}) ncFail.WaitWithDefaultTimeout() defer podmanTest.removeNetwork(netName2) - Expect(ncFail).To(ExitWithError()) + Expect(ncFail).To(ExitWithError(125, "subnet 10.11.13.0/24 is already used on the host or by another config")) }) It("podman network create two IPv6 networks with same subnet should fail", func() { @@ -474,13 +485,13 @@ var _ = Describe("Podman network create", func() { ncFail := podmanTest.Podman([]string{"network", "create", "--subnet", "fd00:4:4:4:4::/64", "--ipv6", netName2}) ncFail.WaitWithDefaultTimeout() defer podmanTest.removeNetwork(netName2) - Expect(ncFail).To(ExitWithError()) + Expect(ncFail).To(ExitWithError(125, "subnet fd00:4:4:4::/64 is already used on the host or by another config")) }) It("podman network create with invalid network name", func() { - nc := podmanTest.Podman([]string{"network", "create", "foo "}) + nc := podmanTest.Podman([]string{"network", "create", "2bad!"}) nc.WaitWithDefaultTimeout() - Expect(nc).To(ExitWithError()) + Expect(nc).To(ExitWithError(125, "network name 2bad! invalid: names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*: invalid argument")) }) It("podman network create with mtu option", func() { @@ -514,7 +525,7 @@ var _ = Describe("Podman network create", func() { nc := podmanTest.Podman([]string{"network", "create", "--opt", "foo=bar", net}) nc.WaitWithDefaultTimeout() defer podmanTest.removeNetwork(net) - Expect(nc).To(ExitWithError()) + Expect(nc).To(ExitWithError(125, "unsupported bridge network option foo")) }) It("podman CNI network create with internal should not have dnsname", func() { @@ -558,8 +569,7 @@ var _ = Describe("Podman network create", func() { for _, name := range []string{"none", "host", "bridge", "private", "slirp4netns", "pasta", "container", "ns", "default"} { nc := podmanTest.Podman([]string{"network", "create", name}) nc.WaitWithDefaultTimeout() - Expect(nc).To(Exit(125)) - Expect(nc.ErrorToString()).To(ContainSubstring("cannot create network with name %q because it conflicts with a valid network mode", name)) + Expect(nc).To(ExitWithError(125, fmt.Sprintf("cannot create network with name %q because it conflicts with a valid network mode", name))) } }) @@ -631,15 +641,13 @@ var _ = Describe("Podman network create", func() { gw2 := "fd52:2a5a:747e:3acf::10" nc := podmanTest.Podman([]string{"network", "create", "--subnet", subnet1, "--gateway", gw1, "--gateway", gw2, name}) nc.WaitWithDefaultTimeout() - Expect(nc).To(Exit(125)) - Expect(nc.ErrorToString()).To(Equal("Error: cannot set more gateways than subnets")) + Expect(nc).To(ExitWithError(125, "cannot set more gateways than subnets")) range1 := "10.10.3.0/26" range2 := "10.10.3.0/28" nc = podmanTest.Podman([]string{"network", "create", "--subnet", subnet1, "--ip-range", range1, "--ip-range", range2, name}) nc.WaitWithDefaultTimeout() - Expect(nc).To(Exit(125)) - Expect(nc.ErrorToString()).To(Equal("Error: cannot set more ranges than subnets")) + Expect(nc).To(ExitWithError(125, "cannot set more ranges than subnets")) }) It("podman network create same name - fail", func() { @@ -653,7 +661,7 @@ var _ = Describe("Podman network create", func() { nc = podmanTest.Podman(networkCreateCommand) nc.WaitWithDefaultTimeout() - Expect(nc).To(Exit(125)) + Expect(nc).To(ExitWithError(125, fmt.Sprintf("network name %s already used: network already exists", name))) }) It("podman network create same name - succeed with ignore", func() { @@ -711,7 +719,7 @@ var _ = Describe("Podman network create", func() { Expect(inspect).Should(ExitCleanly()) // JSON the network configuration into something usable - var results []types.Network + var results []entities.NetworkInspectReport err := json.Unmarshal([]byte(inspect.OutputToString()), &results) Expect(err).ToNot(HaveOccurred()) Expect(results).To(HaveLen(1)) diff --git a/test/e2e/network_test.go b/test/e2e/network_test.go index 1117eb7685..5072eeee0e 100644 --- a/test/e2e/network_test.go +++ b/test/e2e/network_test.go @@ -6,7 +6,7 @@ import ( "path/filepath" "time" - "github.com/containers/common/libnetwork/types" + "github.com/containers/podman/v5/pkg/domain/entities" . "github.com/containers/podman/v5/test/utils" "github.com/containers/storage/pkg/stringid" . "github.com/onsi/ginkgo/v2" @@ -118,8 +118,7 @@ var _ = Describe("Podman network", func() { session = podmanTest.Podman([]string{"network", "ls", "--filter", "namr=ab"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring(`invalid filter "namr"`)) + Expect(session).To(ExitWithError(125, `invalid filter "namr"`)) }) It("podman network list --filter failure", func() { @@ -148,8 +147,7 @@ var _ = Describe("Podman network", func() { session = podmanTest.Podman([]string{"network", "ls", "--filter", "dangling=foo"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring(`invalid dangling filter value "foo"`)) + Expect(session).To(ExitWithError(125, `invalid dangling filter value "foo"`)) }) It("podman network ID test", func() { @@ -192,8 +190,13 @@ var _ = Describe("Podman network", func() { session = podmanTest.Podman([]string{"network", "inspect", netID[1:]}) session.WaitWithDefaultTimeout() - Expect(session).Should(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("network not found")) + expectMessage := fmt.Sprintf("network %s: ", netID[1:]) + // FIXME-someday: figure out why this part does not show up in remote + if !IsRemote() { + expectMessage += fmt.Sprintf("unable to find network with name or ID %s: ", netID[1:]) + } + expectMessage += "network not found" + Expect(session).Should(ExitWithError(125, expectMessage)) session = podmanTest.Podman([]string{"network", "rm", netID}) session.WaitWithDefaultTimeout() @@ -204,7 +207,7 @@ var _ = Describe("Podman network", func() { It(fmt.Sprintf("podman network %s no args", rm), func() { session := podmanTest.Podman([]string{"network", rm}) session.WaitWithDefaultTimeout() - Expect(session).Should(ExitWithError()) + Expect(session).Should(ExitWithError(125, "requires at least 1 arg(s), only received 0")) }) @@ -234,7 +237,7 @@ var _ = Describe("Podman network", func() { It("podman network inspect no args", func() { session := podmanTest.Podman([]string{"network", "inspect"}) session.WaitWithDefaultTimeout() - Expect(session).Should(ExitWithError()) + Expect(session).Should(ExitWithError(125, "requires at least 1 arg(s), only received 0")) }) It("podman network inspect", func() { @@ -395,7 +398,7 @@ var _ = Describe("Podman network", func() { It("podman network remove bogus", func() { session := podmanTest.Podman([]string{"network", "rm", "bogus"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "unable to find network with name or ID bogus: network not found")) }) It("podman network remove --force with pod", func() { @@ -416,7 +419,7 @@ var _ = Describe("Podman network", func() { session = podmanTest.Podman([]string{"network", "rm", netName}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(2)) + Expect(session).Should(ExitWithError(2, fmt.Sprintf(`"%s" has associated containers with it. Use -f to forcibly delete containers and pods: network is being used`, netName))) session = podmanTest.Podman([]string{"network", "rm", "-t", "0", "--force", netName}) session.WaitWithDefaultTimeout() @@ -425,7 +428,7 @@ var _ = Describe("Podman network", func() { // check if pod is deleted session = podmanTest.Podman([]string{"pod", "exists", podID}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "")) // check if net is deleted session = podmanTest.Podman([]string{"network", "ls"}) @@ -530,7 +533,7 @@ var _ = Describe("Podman network", func() { Expect(inspect).Should(ExitCleanly()) // JSON the network configuration into something usable - var results []types.Network + var results []entities.NetworkInspectReport err := json.Unmarshal([]byte(inspect.OutputToString()), &results) Expect(err).ToNot(HaveOccurred()) Expect(results).To(HaveLen(1)) @@ -556,7 +559,7 @@ var _ = Describe("Podman network", func() { inspect.WaitWithDefaultTimeout() Expect(inspect).Should(ExitCleanly()) - var results []types.Network + var results []entities.NetworkInspectReport err := json.Unmarshal([]byte(inspect.OutputToString()), &results) Expect(err).ToNot(HaveOccurred()) Expect(results).To(HaveLen(1)) @@ -584,7 +587,7 @@ var _ = Describe("Podman network", func() { inspect.WaitWithDefaultTimeout() Expect(inspect).Should(ExitCleanly()) - var results []types.Network + var results []entities.NetworkInspectReport err := json.Unmarshal([]byte(inspect.OutputToString()), &results) Expect(err).ToNot(HaveOccurred()) Expect(results).To(HaveLen(1)) @@ -613,7 +616,7 @@ var _ = Describe("Podman network", func() { session = podmanTest.Podman([]string{"network", "exists", stringid.GenerateRandomID()}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "")) }) It("podman network create macvlan with network info and options", func() { @@ -627,7 +630,7 @@ var _ = Describe("Podman network", func() { inspect.WaitWithDefaultTimeout() Expect(inspect).Should(ExitCleanly()) - var results []types.Network + var results []entities.NetworkInspectReport err := json.Unmarshal([]byte(inspect.OutputToString()), &results) Expect(err).ToNot(HaveOccurred()) Expect(results).To(HaveLen(1)) diff --git a/test/e2e/pause_test.go b/test/e2e/pause_test.go index 9ab0be136b..0b474b3ad3 100644 --- a/test/e2e/pause_test.go +++ b/test/e2e/pause_test.go @@ -9,7 +9,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman pause", func() { @@ -37,13 +36,13 @@ var _ = Describe("Podman pause", func() { It("podman pause bogus container", func() { session := podmanTest.Podman([]string{"pause", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `no container with name or ID "foobar" found: no such container`)) }) It("podman unpause bogus container", func() { session := podmanTest.Podman([]string{"unpause", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `no container with name or ID "foobar" found: no such container`)) }) It("podman pause a created container by id", func() { @@ -55,7 +54,7 @@ var _ = Describe("Podman pause", func() { result := podmanTest.Podman([]string{"pause", cid}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) + Expect(result).To(ExitWithError(125, `"created" is not running, can't pause: container state improper`)) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) Expect(strings.ToLower(podmanTest.GetContainerStatus())).To(ContainSubstring(createdState)) @@ -107,7 +106,7 @@ var _ = Describe("Podman pause", func() { result := podmanTest.Podman([]string{"unpause", cid}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + Expect(result).Should(ExitWithError(125, fmt.Sprintf(`"%s" is not paused, can't unpause: container state improper`, cid))) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) }) @@ -128,7 +127,7 @@ var _ = Describe("Podman pause", func() { result = podmanTest.Podman([]string{"rm", cid}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(2)) + Expect(result).Should(ExitWithError(2, fmt.Sprintf("cannot remove container %s as it is paused - running or paused containers cannot be removed without force: container state improper", cid))) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) Expect(strings.ToLower(podmanTest.GetContainerStatus())).To(ContainSubstring(pausedState)) @@ -175,7 +174,7 @@ var _ = Describe("Podman pause", func() { result = podmanTest.Podman([]string{"stop", cid}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + Expect(result).Should(ExitWithError(125, fmt.Sprintf("Error: container %s is running or paused, refusing to clean up: container state improper", cid))) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) Expect(strings.ToLower(podmanTest.GetContainerStatus())).To(ContainSubstring(pausedState)) @@ -186,7 +185,7 @@ var _ = Describe("Podman pause", func() { result = podmanTest.Podman([]string{"rm", cid}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(2)) + Expect(result).Should(ExitWithError(2, fmt.Sprintf("cannot remove container %s as it is running - running or paused containers cannot be removed without force: container state improper", cid))) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) result = podmanTest.Podman([]string{"rm", "-t", "0", "-f", cid}) @@ -321,10 +320,9 @@ var _ = Describe("Podman pause", func() { }) It("podman pause --cidfile", func() { - tmpDir := GinkgoT().TempDir() - tmpFile := tmpDir + "cid" + cidFile := filepath.Join(tempdir, "cid") - session := podmanTest.Podman([]string{"create", "--cidfile", tmpFile, ALPINE, "top"}) + session := podmanTest.Podman([]string{"create", "--cidfile", cidFile, ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) cid := session.OutputToStringArray()[0] @@ -333,13 +331,13 @@ var _ = Describe("Podman pause", func() { session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - result := podmanTest.Podman([]string{"pause", "--cidfile", tmpFile}) + result := podmanTest.Podman([]string{"pause", "--cidfile", cidFile}) result.WaitWithDefaultTimeout() Expect(result).Should(ExitCleanly()) output := result.OutputToString() Expect(output).To(ContainSubstring(cid)) - result = podmanTest.Podman([]string{"unpause", "--cidfile", tmpFile}) + result = podmanTest.Podman([]string{"unpause", "--cidfile", cidFile}) result.WaitWithDefaultTimeout() Expect(result).Should(ExitCleanly()) output = result.OutputToString() @@ -347,25 +345,22 @@ var _ = Describe("Podman pause", func() { }) It("podman pause multiple --cidfile", func() { - tmpDir := GinkgoT().TempDir() - tmpFile1 := tmpDir + "cid-1" - tmpFile2 := tmpDir + "cid-2" + cidFile1 := filepath.Join(tempdir, "cid-1") + cidFile2 := filepath.Join(tempdir, "cid-2") - defer os.RemoveAll(tmpDir) - - session := podmanTest.Podman([]string{"run", "--cidfile", tmpFile1, "-d", ALPINE, "top"}) + session := podmanTest.Podman([]string{"run", "--cidfile", cidFile1, "-d", ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) cid1 := session.OutputToStringArray()[0] Expect(podmanTest.NumberOfContainers()).To(Equal(1)) - session = podmanTest.Podman([]string{"run", "--cidfile", tmpFile2, "-d", ALPINE, "top"}) + session = podmanTest.Podman([]string{"run", "--cidfile", cidFile2, "-d", ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) cid2 := session.OutputToStringArray()[0] Expect(podmanTest.NumberOfContainers()).To(Equal(2)) - result := podmanTest.Podman([]string{"pause", "--cidfile", tmpFile1, "--cidfile", tmpFile2}) + result := podmanTest.Podman([]string{"pause", "--cidfile", cidFile1, "--cidfile", cidFile2}) result.WaitWithDefaultTimeout() Expect(result).Should(ExitCleanly()) output := result.OutputToString() @@ -373,7 +368,7 @@ var _ = Describe("Podman pause", func() { Expect(output).To(ContainSubstring(cid2)) Expect(podmanTest.NumberOfContainers()).To(Equal(2)) - result = podmanTest.Podman([]string{"unpause", "--cidfile", tmpFile1, "--cidfile", tmpFile2}) + result = podmanTest.Podman([]string{"unpause", "--cidfile", cidFile1, "--cidfile", cidFile2}) result.WaitWithDefaultTimeout() Expect(result).Should(ExitCleanly()) output = result.OutputToString() @@ -386,40 +381,38 @@ var _ = Describe("Podman pause", func() { SkipIfRemote("--latest flag n/a") result := podmanTest.Podman([]string{"pause", "--cidfile", "foobar", "--latest"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring("cannot be used together")) + Expect(result).Should(ExitWithError(125, "--all, --latest, and --cidfile cannot be used together")) + result = podmanTest.Podman([]string{"pause", "--cidfile", "foobar", "--all"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring("cannot be used together")) + Expect(result).Should(ExitWithError(125, "--all, --latest, and --cidfile cannot be used together")) + result = podmanTest.Podman([]string{"pause", "--cidfile", "foobar", "--all", "--latest"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring("cannot be used together")) + Expect(result).Should(ExitWithError(125, "--all, --latest, and --cidfile cannot be used together")) + result = podmanTest.Podman([]string{"pause", "--latest", "--all"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring("cannot be used together")) + Expect(result).Should(ExitWithError(125, "--all and --latest cannot be used together")) }) It("podman unpause invalid --latest and --cidfile and --all", func() { SkipIfRemote("--latest flag n/a") result := podmanTest.Podman([]string{"unpause", "--cidfile", "foobar", "--latest"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring("cannot be used together")) + Expect(result).Should(ExitWithError(125, "--all, --latest, and --cidfile cannot be used together")) + result = podmanTest.Podman([]string{"unpause", "--cidfile", "foobar", "--all"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring("cannot be used together")) + Expect(result).Should(ExitWithError(125, "--all, --latest, and --cidfile cannot be used together")) + result = podmanTest.Podman([]string{"unpause", "--cidfile", "foobar", "--all", "--latest"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring("cannot be used together")) + Expect(result).Should(ExitWithError(125, "--all, --latest, and --cidfile cannot be used together")) + result = podmanTest.Podman([]string{"unpause", "--latest", "--all"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring("cannot be used together")) + Expect(result).Should(ExitWithError(125, "--all and --latest cannot be used together")) }) It("podman pause --filter", func() { @@ -441,11 +434,11 @@ var _ = Describe("Podman pause", func() { session1 = podmanTest.Podman([]string{"pause", cid1, "-f", "status=test"}) session1.WaitWithDefaultTimeout() - Expect(session1).Should(Exit(125)) + Expect(session1).Should(ExitWithError(125, "--filter takes no arguments")) session1 = podmanTest.Podman([]string{"unpause", cid1, "-f", "status=paused"}) session1.WaitWithDefaultTimeout() - Expect(session1).Should(Exit(125)) + Expect(session1).Should(ExitWithError(125, "--filter takes no arguments")) session1 = podmanTest.Podman([]string{"pause", "-a", "--filter", "label=test=with,comma"}) session1.WaitWithDefaultTimeout() diff --git a/test/e2e/play_kube_test.go b/test/e2e/play_kube_test.go index 9a1561e93c..cc051137d1 100644 --- a/test/e2e/play_kube_test.go +++ b/test/e2e/play_kube_test.go @@ -273,8 +273,11 @@ spec: - name: testctr image: ` + CITEST_IMAGE + ` command: - - sleep - - inf + - /bin/sh + - -c + - | + trap exit SIGTERM + while :; do sleep 0.1; done volumeMounts: - mountPath: /var name: testing @@ -285,6 +288,30 @@ spec: claimName: testvol ` +var signalTest = ` +apiVersion: v1 +kind: Pod +metadata: + name: testpod +spec: + containers: + - name: testctr + image: ` + CITEST_IMAGE + ` + command: + - /bin/sh + - -c + - | + trap 'echo TERMINATED > /testvol/termfile; exit' SIGTERM + while true; do sleep 0.1; done + volumeMounts: + - mountPath: /testvol + name: testvol + volumes: + - name: testvol + persistentVolumeClaim: + claimName: testvol +` + var checkInfraImagePodYaml = ` apiVersion: v1 kind: Pod @@ -2140,8 +2167,14 @@ var _ = Describe("Podman kube play", func() { kube := podmanTest.Podman([]string{"play", "kube", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).To(ExitWithError()) - + expect := "YAML document does not contain any supported kube kind" + // On anything kube-related, podman-remote emits a magic prefix + // that regular podman doesn't. Test for it here, but let's not + // do so in every single test. + if IsRemote() { + expect = "playing YAML file: " + expect + } + Expect(kube).To(ExitWithError(125, expect)) }) It("fail with custom selinux label", func() { @@ -2192,8 +2225,7 @@ var _ = Describe("Podman kube play", func() { kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).Should(Exit(125)) - Expect(kube.ErrorToString()).To(ContainSubstring("failed to create volume \"mycm\": no such ConfigMap \"mycm\"")) + Expect(kube).Should(ExitWithError(125, `failed to create volume "mycm": no such ConfigMap "mycm"`)) }) It("test HostAliases with --no-hosts", func() { @@ -2211,8 +2243,7 @@ var _ = Describe("Podman kube play", func() { kube := podmanTest.Podman([]string{"kube", "play", "--no-hosts", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).Should(Exit(125)) - Expect(kube.ErrorToString()).To(ContainSubstring("HostAliases in yaml file will not work with --no-hosts")) + Expect(kube).Should(ExitWithError(125, "HostAliases in yaml file will not work with --no-hosts")) }) It("should use customized infra_image", func() { @@ -2311,14 +2342,13 @@ var _ = Describe("Podman kube play", func() { Expect(containerNames).To(ContainElement("podnameEqualsContainerNameYaml-podnameEqualsContainerNameYaml")) }) - It("should error if pod dont have a name", func() { + It("should error if pod doesn't have a name", func() { err := writeYaml(podWithoutAName, kubeYaml) Expect(err).ToNot(HaveOccurred()) kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).Should(Exit(125)) - + Expect(kube).Should(ExitWithError(125, "pod does not have a name")) }) It("support container liveness probe", func() { @@ -2366,7 +2396,7 @@ var _ = Describe("Podman kube play", func() { hc := podmanTest.Podman([]string{"healthcheck", "run", ctrName}) hc.WaitWithDefaultTimeout() - Expect(hc).Should(Exit(1)) + Expect(hc).Should(ExitWithError(1, "")) exec := podmanTest.Podman([]string{"exec", ctrName, "sh", "-c", "echo 'startup probe success' > /testfile"}) exec.WaitWithDefaultTimeout() @@ -2386,8 +2416,7 @@ var _ = Describe("Podman kube play", func() { kube := podmanTest.Podman([]string{"kube", "play", "--authfile", "/tmp/nonexistent", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).To(ExitWithError()) - + Expect(kube).To(ExitWithError(125, "credential file is not accessible: faccessat /tmp/nonexistent: no such file or directory")) }) It("test correct command", func() { @@ -2716,7 +2745,7 @@ var _ = Describe("Podman kube play", func() { kube := podmanTest.Podman([]string{"kube", "play", kubeYaml, "--configmap", cmYamlPathname}) kube.WaitWithDefaultTimeout() - Expect(kube).To(ExitWithError()) + Expect(kube).To(ExitWithError(125, "cannot set env FOO: key MISSING_KEY not found in configmap foo")) }) It("test required env value from missing configmap", func() { @@ -2726,7 +2755,7 @@ var _ = Describe("Podman kube play", func() { kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).To(ExitWithError()) + Expect(kube).To(ExitWithError(125, "cannot set env FOO: configmap missing_cm not found")) }) It("test optional env value from configmap with missing key", func() { @@ -2792,7 +2821,7 @@ var _ = Describe("Podman kube play", func() { kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).To(ExitWithError()) + Expect(kube).To(ExitWithError(125, "configmap missing_cm not found")) }) It("test get all key-value pairs from optional configmap as envs", func() { @@ -2828,7 +2857,7 @@ var _ = Describe("Podman kube play", func() { kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).To(ExitWithError()) + Expect(kube).To(ExitWithError(125, `cannot set env FOO: no secret with name or id "foo": no such secret`)) }) It("test required env value from secret with missing key", func() { @@ -2839,7 +2868,7 @@ var _ = Describe("Podman kube play", func() { kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).To(ExitWithError()) + Expect(kube).To(ExitWithError(125, "cannot set env FOO: secret foo has not MISSING key")) }) It("test optional env value from missing secret", func() { @@ -2897,7 +2926,7 @@ var _ = Describe("Podman kube play", func() { kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).To(ExitWithError()) + Expect(kube).To(ExitWithError(125, `no secret with name or id "missing_secret": no such secret`)) }) It("test get all key-value pairs from optional secret as envs", func() { @@ -2918,7 +2947,7 @@ var _ = Describe("Podman kube play", func() { kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).To(ExitWithError()) + Expect(kube).To(ExitWithError(125, `the pod "testPod" is invalid; duplicate container name "testctr" detected`)) p = getPod(withPodInitCtr(getCtr(withImage(CITEST_IMAGE), withCmd([]string{"echo", "hello"}), withInitCtr(), withName("initctr"))), withCtr(getCtr(withImage(CITEST_IMAGE), withName("initctr"), withCmd([]string{"top"})))) @@ -2927,7 +2956,7 @@ var _ = Describe("Podman kube play", func() { kube = podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).To(ExitWithError()) + Expect(kube).To(ExitWithError(125, `adding pod to state: name "testPod" is in use: pod already exists`)) }) It("test hostname", func() { @@ -3112,8 +3141,7 @@ var _ = Describe("Podman kube play", func() { kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).Should(Exit(125)) - Expect(kube.ErrorToString()).To(ContainSubstring(BB_GLIBC + ": image not known")) + Expect(kube).Should(ExitWithError(125, BB_GLIBC+": image not known")) }) It("with pull policy of missing", func() { @@ -3191,6 +3219,46 @@ var _ = Describe("Podman kube play", func() { Expect(oldBBinspect[0].Digest).To(Not(Equal(newBBinspect[0].Digest))) }) + It("with no tag and no pull policy should always pull", func() { + oldBB := "quay.io/libpod/busybox:1.30.1" + pull := podmanTest.Podman([]string{"pull", oldBB}) + pull.WaitWithDefaultTimeout() + Expect(pull).Should(Exit(0)) + + tag := podmanTest.Podman([]string{"tag", oldBB, BB}) + tag.WaitWithDefaultTimeout() + Expect(tag).Should(ExitCleanly()) + + rmi := podmanTest.Podman([]string{"rmi", oldBB}) + rmi.WaitWithDefaultTimeout() + Expect(rmi).Should(ExitCleanly()) + + inspect := podmanTest.Podman([]string{"inspect", BB}) + inspect.WaitWithDefaultTimeout() + Expect(inspect).Should(ExitCleanly()) + oldBBinspect := inspect.InspectImageJSON() + + noTagBB := "quay.io/libpod/busybox" + ctr := getCtr(withImage(noTagBB), withPullPolicy("")) + err := generateKubeYaml("pod", getPod(withCtr(ctr)), kubeYaml) + Expect(err).ToNot(HaveOccurred()) + + kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) + kube.WaitWithDefaultTimeout() + Expect(kube).Should(Exit(0)) + if IsRemote() { + Expect(kube.ErrorToString()).To(BeEmpty()) + } else { + Expect(kube.ErrorToString()).To(ContainSubstring("Copying blob ")) + } + + inspect = podmanTest.Podman([]string{"inspect", noTagBB}) + inspect.WaitWithDefaultTimeout() + Expect(inspect).Should(ExitCleanly()) + newBBinspect := inspect.InspectImageJSON() + Expect(oldBBinspect[0].Digest).To(Not(Equal(newBBinspect[0].Digest))) + }) + It("with image data", func() { testyaml := ` apiVersion: v1 @@ -3228,7 +3296,7 @@ spec: Expect(ctr[0].Config.WorkingDir).To(ContainSubstring("/etc")) Expect(ctr[0].Config.Labels).To(HaveKeyWithValue("key1", ContainSubstring("value1"))) Expect(ctr[0].Config.Labels).To(HaveKeyWithValue("key1", ContainSubstring("value1"))) - Expect(ctr[0].Config).To(HaveField("StopSignal", uint(51))) + Expect(ctr[0].Config).To(HaveField("StopSignal", "SIGRTMAX-13")) }) It("daemonset sanity", func() { @@ -3399,7 +3467,7 @@ spec: kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).To(ExitWithError()) + Expect(kube).To(ExitWithError(125, fmt.Sprintf(`failed to create volume "testVol": in parsing HostPath in YAML: faccessat %s: no such file or directory`, hostPathLocation))) Expect(kube.ErrorToString()).To(ContainSubstring(defaultVolName)) }) @@ -3427,7 +3495,7 @@ spec: kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).To(ExitWithError()) + Expect(kube).To(ExitWithError(125, fmt.Sprintf(`failed to create volume "testVol": in parsing HostPath in YAML: faccessat %s: no such file or directory`, hostPathLocation))) }) It("test with File HostPath type volume", func() { @@ -3520,7 +3588,7 @@ spec: kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).To(ExitWithError()) + Expect(kube).To(ExitWithError(125, fmt.Sprintf(`failed to create volume "testVol": checking HostPathSocket: path %s is not a socket`, hostPathLocation))) }) It("test with read-only HostPath volume", func() { @@ -3584,7 +3652,7 @@ VOLUME %s`, CITEST_IMAGE, hostPathDir+"/") kube.WaitWithDefaultTimeout() Expect(kube).Should(ExitCleanly()) - result := podmanTest.Podman([]string{"exec", getCtrNameInPod(pod), "ls", hostPathDir + "/" + testfile}) + result := podmanTest.Podman([]string{"exec", getCtrNameInPod(pod), "ls", filepath.Join(hostPathDir, testfile)}) result.WaitWithDefaultTimeout() Expect(result).Should(ExitCleanly()) @@ -3939,8 +4007,7 @@ MemoryReservation: {{ .HostConfig.MemoryReservation }}`}) kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).Should(Exit(125)) - Expect(kube.ErrorToString()).To(ContainSubstring("invalid reference format")) + Expect(kube).Should(ExitWithError(125, "invalid reference format")) }) It("applies log driver to containers", func() { @@ -4053,7 +4120,7 @@ o: {{ .Options.o }}`}) It("persistentVolumeClaim with source", func() { fileName := "data" expectedFileContent := "Test" - tarFilePath := filepath.Join(os.TempDir(), "podmanVolumeSource.tgz") + tarFilePath := filepath.Join(podmanTest.TempDir, "podmanVolumeSource.tgz") err := createSourceTarFile(fileName, expectedFileContent, tarFilePath) Expect(err).ToNot(HaveOccurred()) @@ -4067,8 +4134,7 @@ o: {{ .Options.o }}`}) kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() if IsRemote() { - Expect(kube).Should(Exit(125)) - Expect(kube.ErrorToString()).To(ContainSubstring("importing volumes is not supported for remote requests")) + Expect(kube).Should(ExitWithError(125, "importing volumes is not supported for remote requests")) return } Expect(kube).Should(ExitCleanly()) @@ -4283,7 +4349,7 @@ invalid kube kind kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).To(ExitWithError()) + Expect(kube).To(ExitWithError(125, "multi doc yaml could not be split: yaml: line 12: found character that cannot start any token")) }) It("with auto update annotations for all containers", func() { @@ -4472,7 +4538,7 @@ invalid kube kind // volume should not be deleted on teardown exists = podmanTest.Podman([]string{"volume", "exists", volName}) exists.WaitWithDefaultTimeout() - Expect(exists).To(Exit(1)) + Expect(exists).To(ExitWithError(1, "")) }) It("after teardown with volume reuse", func() { @@ -4709,7 +4775,7 @@ ENV OPENJ9_JAVA_OPTIONS=%q kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).To(ExitWithError()) + Expect(kube).To(ExitWithError(125, "cannot set env FOO: key MISSING_KEY not found in configmap foo")) }) It("succeeds for optional env value with missing key", func() { @@ -4933,8 +4999,7 @@ ENV OPENJ9_JAVA_OPTIONS=%q kube := podmanTest.Podman([]string{"kube", "play", kubeYaml, "--configmap", fsCmYamlPathname}) kube.WaitWithDefaultTimeout() - Expect(kube).Should(Exit(125)) - Expect(kube.ErrorToString()).To(ContainSubstring("ambiguous configuration: the same config map foo is present in YAML and in --configmaps")) + Expect(kube).Should(ExitWithError(125, "ambiguous configuration: the same config map foo is present in YAML and in --configmaps")) }) }) @@ -5139,7 +5204,7 @@ ENV OPENJ9_JAVA_OPTIONS=%q kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).Should(Exit(125)) + Expect(kube).Should(ExitWithError(125, fmt.Sprintf(`failed to create volume "testVol": checking HostPathBlockDevice: stat %s: no such file or directory`, devicePath))) }) It("reports error when we try to expose char device as block device", func() { @@ -5165,7 +5230,7 @@ ENV OPENJ9_JAVA_OPTIONS=%q kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).Should(Exit(125)) + Expect(kube).Should(ExitWithError(125, fmt.Sprintf(`failed to create volume "testVol": checking HostPathDevice: path %s is not a block device`, devicePath))) }) It("reports error when we try to expose block device as char device", func() { @@ -5190,7 +5255,7 @@ ENV OPENJ9_JAVA_OPTIONS=%q kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).Should(Exit(125)) + Expect(kube).Should(ExitWithError(125, fmt.Sprintf(`failed to create volume "testVol": checking HostPathCharDevice: path %s is not a character device`, devicePath))) }) It("secret as volume support - simple", func() { @@ -5444,7 +5509,7 @@ spec: kube := podmanTest.Podman([]string{"kube", "play", "--quiet", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).To(ExitWithError()) + Expect(kube).To(ExitWithError(125, "rootlessport cannot expose privileged port 80,")) // The ugly format-error exited once in Podman. The test makes // sure it's not coming back. Expect(kube.ErrorToString()).To(Not(ContainSubstring("Error: %!s()"))) @@ -5468,10 +5533,13 @@ spec: name: vol-test-3 ` + err = writeYaml(podTemplate, kubeYaml) + Expect(err).ToNot(HaveOccurred()) + // the image is incorrect so the kube play will fail, but it will clean up the pod that was created for it before the failure happened - kube := podmanTest.Podman([]string{"kube", "play", podTemplate}) + kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).To(ExitWithError()) + Expect(kube).To(ExitWithError(125, "multi doc yaml could not be split: yaml: line 5: found character that cannot start any token")) ps := podmanTest.Podman([]string{"pod", "ps", "-q"}) ps.WaitWithDefaultTimeout() @@ -5528,6 +5596,28 @@ spec: Expect(checkVol.OutputToString()).To(Equal("testvol1")) }) + It("with graceful shutdown", func() { + + volumeCreate := podmanTest.Podman([]string{"volume", "create", "testvol"}) + volumeCreate.WaitWithDefaultTimeout() + Expect(volumeCreate).Should(ExitCleanly()) + + err = writeYaml(signalTest, kubeYaml) + Expect(err).ToNot(HaveOccurred()) + + playKube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) + playKube.WaitWithDefaultTimeout() + Expect(playKube).Should(ExitCleanly()) + + teardown := podmanTest.Podman([]string{"kube", "down", kubeYaml}) + teardown.WaitWithDefaultTimeout() + Expect(teardown).Should(ExitCleanly()) + + session := podmanTest.Podman([]string{"run", "--volume", "testvol:/testvol", CITEST_IMAGE, "sh", "-c", "cat /testvol/termfile"}) + session.WaitWithDefaultTimeout() + Expect(session.OutputToString()).Should(ContainSubstring("TERMINATED")) + }) + It("with hostPath subpaths", func() { if !Containerized() { Skip("something is wrong with file permissions in CI or in the yaml creation. cannot ls or cat the fs unless in a container") @@ -5585,14 +5675,10 @@ spec: playKube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) playKube.WaitWithDefaultTimeout() - Expect(playKube).Should(Exit(125)) - Expect(playKube.OutputToString()).Should(ContainSubstring("is outside")) + Expect(playKube).Should(ExitWithError(125, fmt.Sprintf(`subpath "testing/onlythis" is outside of the volume "%s/root/volumes/testvol/_data`, podmanTest.TempDir))) }) It("with unsafe hostPath subpaths", func() { - if !Containerized() { - Skip("something is wrong with file permissions in CI or in the yaml creation. cannot ls or cat the fs unless in a container") - } hostPathLocation := podmanTest.TempDir Expect(os.MkdirAll(filepath.Join(hostPathLocation, "testing"), 0755)).To(Succeed()) @@ -5605,8 +5691,7 @@ spec: playKube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) playKube.WaitWithDefaultTimeout() - Expect(playKube).Should(Exit(125)) - Expect(playKube.OutputToString()).Should(ContainSubstring("is outside")) + Expect(playKube).Should(ExitWithError(125, fmt.Sprintf(`subpath "testing/symlink" is outside of the volume "%s"`, hostPathLocation))) }) It("with configMap subpaths", func() { @@ -5652,7 +5737,7 @@ spec: curlTest := podmanTest.Podman([]string{"run", "--network", "host", NGINX_IMAGE, "curl", "-s", "localhost:19000"}) curlTest.WaitWithDefaultTimeout() - Expect(curlTest).Should(Exit(7)) + Expect(curlTest).Should(ExitWithError(7, "")) }) It("without Ports, publish in command line - curl should succeed", func() { @@ -5673,11 +5758,7 @@ spec: kube := podmanTest.Podman([]string{"kube", "play", "--publish-all=true", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).Should(Exit(125)) - // The error message is printed only on local call - if !IsRemote() { - Expect(kube.OutputToString()).Should(ContainSubstring("rootlessport cannot expose privileged port 80")) - } + Expect(kube).Should(ExitWithError(125, "rootlessport cannot expose privileged port 80")) }) It("podman play kube should not publish containerPort by default", func() { @@ -5757,7 +5838,7 @@ spec: kube.WaitWithDefaultTimeout() Expect(kube).Should(ExitCleanly()) - verifyPodPorts(podmanTest, "network-echo", "19008/tcp:[{ 19010}]", "19008/udp:[{ 19009}]") + verifyPodPorts(podmanTest, "network-echo", "19008/tcp:[{0.0.0.0 19010}]", "19008/udp:[{0.0.0.0 19009}]") }) It("override with udp should keep tcp from YAML file", func() { @@ -5768,7 +5849,7 @@ spec: kube.WaitWithDefaultTimeout() Expect(kube).Should(ExitCleanly()) - verifyPodPorts(podmanTest, "network-echo", "19008/tcp:[{ 19011}]", "19008/udp:[{ 19012}]") + verifyPodPorts(podmanTest, "network-echo", "19008/tcp:[{0.0.0.0 19011}]", "19008/udp:[{0.0.0.0 19012}]") }) It("with replicas limits the count to 1 and emits a warning", func() { @@ -5893,48 +5974,31 @@ spec: kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).Should(Exit(125)) - Expect(kube.ErrorToString()).To(ContainSubstring("since Network Namespace set to host: invalid argument")) + Expect(kube).Should(ExitWithError(125, "since Network Namespace set to host: invalid argument")) }) - It("test with --no-trunc", func() { - ctrName := "demo" - vol1 := filepath.Join(podmanTest.TempDir, RandomString(99)) - err := os.MkdirAll(vol1, 0755) + It("test with annotation size beyond limits", func() { + key := "name" + val := RandomString(define.TotalAnnotationSizeLimitB - len(key) + 1) + pod := getPod(withAnnotation(key, val)) + err := generateKubeYaml("pod", pod, kubeYaml) Expect(err).ToNot(HaveOccurred()) - session := podmanTest.Podman([]string{"run", "-v", vol1 + ":/tmp/foo:Z", "--name", ctrName, CITEST_IMAGE}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - - file := filepath.Join(podmanTest.TempDir, ctrName+".yml") - session = podmanTest.Podman([]string{"kube", "generate", "--no-trunc", "-f", file, ctrName}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - - session = podmanTest.Podman([]string{"kube", "play", "--no-trunc", file}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) + kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) + kube.WaitWithDefaultTimeout() + Expect(kube).Should(ExitWithError(125, "annotations size "+strconv.Itoa(len(key+val))+" is larger than limit "+strconv.Itoa(define.TotalAnnotationSizeLimitB))) }) - It("test with long annotation", func() { - ctrName := "demo" - vol1 := filepath.Join(podmanTest.TempDir, RandomString(99)) - err := os.MkdirAll(vol1, 0755) + It("test with annotation size within limits", func() { + key := "name" + val := RandomString(define.TotalAnnotationSizeLimitB - len(key)) + pod := getPod(withAnnotation(key, val)) + err := generateKubeYaml("pod", pod, kubeYaml) Expect(err).ToNot(HaveOccurred()) - session := podmanTest.Podman([]string{"run", "-v", vol1 + ":/tmp/foo:Z", "--name", ctrName, CITEST_IMAGE}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - - file := filepath.Join(podmanTest.TempDir, ctrName+".yml") - session = podmanTest.Podman([]string{"kube", "generate", "--no-trunc", "-f", file, ctrName}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - - session = podmanTest.Podman([]string{"kube", "play", file}) - session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) + kube.WaitWithDefaultTimeout() + Expect(kube).Should(ExitCleanly()) }) It("test pod with volumes-from annotation in yaml", func() { @@ -6084,8 +6148,7 @@ spec: kube := podmanTest.Podman([]string{"kube", "play", kubeYaml}) kube.WaitWithDefaultTimeout() - Expect(kube).Should(Exit(125)) - Expect(kube.ErrorToString()).To(ContainSubstring("annotation " + define.VolumesFromAnnotation + " without target volume is reserved for internal use")) + Expect(kube).Should(ExitWithError(125, "annotation "+define.VolumesFromAnnotation+" without target volume is reserved for internal use")) }) It("test with reserved autoremove annotation in yaml", func() { diff --git a/test/e2e/pod_create_test.go b/test/e2e/pod_create_test.go index b6b579ba72..c5d7ff6bb6 100644 --- a/test/e2e/pod_create_test.go +++ b/test/e2e/pod_create_test.go @@ -16,7 +16,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" "github.com/opencontainers/selinux/go-selinux" ) @@ -68,7 +67,7 @@ var _ = Describe("Podman pod create", func() { Expect(webserver).Should(ExitCleanly()) check := SystemExec("nc", []string{"-z", "localhost", "80"}) - Expect(check).Should(Exit(1)) + Expect(check).Should(ExitWithError(1, "")) }) It("podman create pod with network portbindings", func() { @@ -103,7 +102,7 @@ var _ = Describe("Podman pod create", func() { name := "test" session := podmanTest.Podman([]string{"pod", "create", "--infra=false", "--name", name, "-p", "80:80"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "you must have an infra container to publish port bindings to the host")) }) It("podman create pod with --no-hosts", func() { @@ -126,7 +125,7 @@ var _ = Describe("Podman pod create", func() { name := "test" podCreate := podmanTest.Podman([]string{"pod", "create", "--no-hosts", "--name", name, "--infra=false"}) podCreate.WaitWithDefaultTimeout() - Expect(podCreate).Should(Exit(125)) + Expect(podCreate).Should(ExitWithError(125, "cannot specify --no-hosts without an infra container")) }) It("podman create pod with --add-host", func() { @@ -145,7 +144,7 @@ var _ = Describe("Podman pod create", func() { name := "test" podCreate := podmanTest.Podman([]string{"pod", "create", "--add-host", "test.example.com:12.34.56.78", "--name", name, "--infra=false"}) podCreate.WaitWithDefaultTimeout() - Expect(podCreate).Should(Exit(125)) + Expect(podCreate).Should(ExitWithError(125, "NoInfra and HostAdd are mutually exclusive pod options: invalid pod spec")) }) It("podman create pod with DNS server set", func() { @@ -166,7 +165,7 @@ var _ = Describe("Podman pod create", func() { server := "12.34.56.78" podCreate := podmanTest.Podman([]string{"pod", "create", "--dns", server, "--name", name, "--infra=false"}) podCreate.WaitWithDefaultTimeout() - Expect(podCreate).Should(Exit(125)) + Expect(podCreate).Should(ExitWithError(125, "NoInfra and DNSServer are mutually exclusive pod options: invalid pod spec")) }) It("podman create pod with DNS option set", func() { @@ -187,7 +186,7 @@ var _ = Describe("Podman pod create", func() { option := "attempts:5" podCreate := podmanTest.Podman([]string{"pod", "create", "--dns-opt", option, "--name", name, "--infra=false"}) podCreate.WaitWithDefaultTimeout() - Expect(podCreate).Should(Exit(125)) + Expect(podCreate).Should(ExitWithError(125, "NoInfra and DNSOption are mutually exclusive pod options: invalid pod spec")) }) It("podman create pod with DNS search domain set", func() { @@ -208,7 +207,7 @@ var _ = Describe("Podman pod create", func() { search := "example.com" podCreate := podmanTest.Podman([]string{"pod", "create", "--dns-search", search, "--name", name, "--infra=false"}) podCreate.WaitWithDefaultTimeout() - Expect(podCreate).Should(Exit(125)) + Expect(podCreate).Should(ExitWithError(125, "NoInfo and DNSSearch are mutually exclusive pod options: invalid pod spec")) }) It("podman create pod with IP address", func() { @@ -218,7 +217,7 @@ var _ = Describe("Podman pod create", func() { podCreate.WaitWithDefaultTimeout() // Rootless should error without network if isRootless() { - Expect(podCreate).Should(Exit(125)) + Expect(podCreate).Should(ExitWithError(125, "invalid config provided: networks and static ip/mac address can only be used with Bridge mode networking")) } else { Expect(podCreate).Should(ExitCleanly()) podResolvConf := podmanTest.Podman([]string{"run", "--pod", name, "--rm", ALPINE, "ip", "addr"}) @@ -251,7 +250,7 @@ var _ = Describe("Podman pod create", func() { ip := GetSafeIPAddress() podCreate := podmanTest.Podman([]string{"pod", "create", "--ip", ip, "--name", name, "--infra=false"}) podCreate.WaitWithDefaultTimeout() - Expect(podCreate).Should(Exit(125)) + Expect(podCreate).Should(ExitWithError(125, "cannot set --ip without infra container: invalid argument")) }) It("podman create pod with MAC address", func() { @@ -261,7 +260,7 @@ var _ = Describe("Podman pod create", func() { podCreate.WaitWithDefaultTimeout() // Rootless should error if isRootless() { - Expect(podCreate).Should(Exit(125)) + Expect(podCreate).Should(ExitWithError(125, "invalid config provided: networks and static ip/mac address can only be used with Bridge mode networking")) } else { Expect(podCreate).Should(ExitCleanly()) podResolvConf := podmanTest.Podman([]string{"run", "--pod", name, "--rm", ALPINE, "ip", "addr"}) @@ -276,7 +275,7 @@ var _ = Describe("Podman pod create", func() { mac := "92:d0:c6:0a:29:35" podCreate := podmanTest.Podman([]string{"pod", "create", "--mac-address", mac, "--name", name, "--infra=false"}) podCreate.WaitWithDefaultTimeout() - Expect(podCreate).Should(Exit(125)) + Expect(podCreate).Should(ExitWithError(125, "cannot set --mac without infra container: invalid argument")) }) It("podman create pod and print id to external file", func() { @@ -302,9 +301,9 @@ var _ = Describe("Podman pod create", func() { It("podman pod create --replace", func() { // Make sure we error out with --name. - session := podmanTest.Podman([]string{"pod", "create", "--replace", ALPINE, "/bin/sh"}) + session := podmanTest.Podman([]string{"pod", "create", "--replace"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "cannot replace pod without --name being set")) // Create and replace 5 times in a row the "same" pod. podName := "testCtr" @@ -329,7 +328,7 @@ var _ = Describe("Podman pod create", func() { check1 := podmanTest.Podman([]string{"container", "inspect", "--format", "{{.Config.Entrypoint}}", data.Containers[0].ID}) check1.WaitWithDefaultTimeout() Expect(check1).Should(ExitCleanly()) - Expect(check1.OutputToString()).To(Equal("/catatonit -P")) + Expect(check1.OutputToString()).To(Equal("[/catatonit -P]")) // check the Path and Args check2 := podmanTest.Podman([]string{"container", "inspect", "--format", "{{.Path}}:{{.Args}}", data.Containers[0].ID}) @@ -352,7 +351,7 @@ var _ = Describe("Podman pod create", func() { check1 := podmanTest.Podman([]string{"container", "inspect", "--format", "{{.Config.Entrypoint}}", data.Containers[0].ID}) check1.WaitWithDefaultTimeout() Expect(check1).Should(ExitCleanly()) - Expect(check1.OutputToString()).To(Equal("/pause1")) + Expect(check1.OutputToString()).To(Equal("[/pause1]")) // check the Path and Args check2 := podmanTest.Podman([]string{"container", "inspect", "--format", "{{.Path}}:{{.Args}}", data.Containers[0].ID}) @@ -379,7 +378,7 @@ entrypoint ["/fromimage"] check1 := podmanTest.Podman([]string{"container", "inspect", "--format", "{{.Config.Entrypoint}}", data.Containers[0].ID}) check1.WaitWithDefaultTimeout() Expect(check1).Should(ExitCleanly()) - Expect(check1.OutputToString()).To(Equal("/fromimage")) + Expect(check1.OutputToString()).To(Equal("[/fromimage]")) // check the Path and Args check2 := podmanTest.Podman([]string{"container", "inspect", "--format", "{{.Path}}:{{.Args}}", data.Containers[0].ID}) @@ -406,7 +405,7 @@ entrypoint ["/fromimage"] check1 := podmanTest.Podman([]string{"container", "inspect", "--format", "{{.Config.Entrypoint}}", data.Containers[0].ID}) check1.WaitWithDefaultTimeout() Expect(check1).Should(ExitCleanly()) - Expect(check1.OutputToString()).To(Equal("/fromcommand")) + Expect(check1.OutputToString()).To(Equal("[/fromcommand]")) // check the Path and Args check2 := podmanTest.Podman([]string{"container", "inspect", "--format", "{{.Path}}:{{.Args}}", data.Containers[0].ID}) @@ -460,7 +459,7 @@ entrypoint ["/fromimage"] It("podman create with unsupported network options", func() { podCreate := podmanTest.Podman([]string{"pod", "create", "--network", "container:doesnotmatter"}) podCreate.WaitWithDefaultTimeout() - Expect(podCreate).Should(Exit(125)) + Expect(podCreate).Should(ExitWithError(125, "pods presently do not support network mode container")) Expect(podCreate.ErrorToString()).To(ContainSubstring("pods presently do not support network mode container")) }) @@ -586,7 +585,7 @@ ENTRYPOINT ["sleep","99999"] podCreate = podmanTest.Podman([]string{"pod", "create", "--pid", ns, "--name", podName, "--share", "pid"}) podCreate.WaitWithDefaultTimeout() - Expect(podCreate).Should(ExitWithError()) + Expect(podCreate).Should(ExitWithError(125, "cannot use pod namespace as container is not joining a pod or pod has no infra container: invalid argument")) podName = "pidPod3" ns = "host" @@ -619,7 +618,13 @@ ENTRYPOINT ["sleep","99999"] podCreate = podmanTest.Podman([]string{"pod", "create", "--pid", ns, "--name", podName, "--share", "pid"}) podCreate.WaitWithDefaultTimeout() - Expect(podCreate).Should(ExitWithError()) + // This can fail in two ways, depending on intricate SELinux specifics: + // There are actually two different failure messages: + // container "randomfakeid" not found: no container with name ... + // looking up container to share pid namespace with: no container with name ... + // Too complicated to differentiate in test context, so we ignore the first part + // and just check for the "no container" substring, which is common to both. + Expect(podCreate).Should(ExitWithError(125, `no container with name or ID "randomfakeid" found: no such container`)) }) @@ -656,7 +661,7 @@ ENTRYPOINT ["sleep","99999"] // fail if --pod and --userns set together session = podmanTest.Podman([]string{"run", "--pod", podName, "--userns", "keep-id", ALPINE, "id", "-u"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "--userns and --pod cannot be set together")) }) It("podman pod create with --userns=keep-id can add users", func() { diff --git a/test/e2e/pod_infra_container_test.go b/test/e2e/pod_infra_container_test.go index f88733e607..adc0303b7d 100644 --- a/test/e2e/pod_infra_container_test.go +++ b/test/e2e/pod_infra_container_test.go @@ -1,6 +1,7 @@ package integration import ( + "fmt" "strconv" . "github.com/containers/podman/v5/test/utils" @@ -101,7 +102,7 @@ var _ = Describe("Podman pod create", func() { session = podmanTest.Podman([]string{"run", fedoraMinimal, "curl", "-f", "localhost"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(7, "Failed to connect to localhost port 80 ")) session = podmanTest.Podman([]string{"pod", "create", "--network", "host"}) session.WaitWithDefaultTimeout() @@ -220,7 +221,7 @@ var _ = Describe("Podman pod create", func() { session = podmanTest.Podman([]string{"run", "--pod", podID, "--network", "bridge", NGINX_IMAGE, "curl", "-f", "localhost"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(7, "Failed to connect to localhost port 80 ")) }) It("podman pod container can override pod pid NS", func() { @@ -311,14 +312,14 @@ var _ = Describe("Podman pod create", func() { Expect(session).Should(ExitCleanly()) podID := session.OutputToString() - session = podmanTest.Podman([]string{"ps", "-aq"}) + session = podmanTest.Podman([]string{"ps", "-aq", "--no-trunc"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) infraID := session.OutputToString() session = podmanTest.Podman([]string{"rm", infraID}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, fmt.Sprintf("container %s is the infra container of pod %s and cannot be removed without removing the pod", infraID, podID))) session = podmanTest.Podman([]string{"pod", "rm", podID}) session.WaitWithDefaultTimeout() @@ -384,8 +385,7 @@ var _ = Describe("Podman pod create", func() { session = podmanTest.Podman([]string{"create", "--pod", podID, "--add-host", "foobar:127.0.0.1", ALPINE, "ping", "-c", "1", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).Should(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("extra host entries must be specified on the pod: network cannot be configured when it is shared with a pod")) + Expect(session).Should(ExitWithError(125, "extra host entries must be specified on the pod: network cannot be configured when it is shared with a pod")) // verify we can see the pods hosts session = podmanTest.Podman([]string{"run", "--cap-add", "net_raw", "--pod", podID, ALPINE, "ping", "-c", "1", "host1"}) diff --git a/test/e2e/pod_initcontainers_test.go b/test/e2e/pod_initcontainers_test.go index 8c0e79b15a..5aa291ed6f 100644 --- a/test/e2e/pod_initcontainers_test.go +++ b/test/e2e/pod_initcontainers_test.go @@ -8,7 +8,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman init containers", func() { @@ -16,13 +15,13 @@ var _ = Describe("Podman init containers", func() { It("podman create init container without --pod should fail", func() { session := podmanTest.Podman([]string{"create", "--init-ctr", "always", ALPINE, "top"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "must specify pod value with init-ctr")) }) It("podman create init container with bad init type should fail", func() { session := podmanTest.Podman([]string{"create", "--init-ctr", "unknown", "--pod", "new:foobar", ALPINE, "top"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "init-ctr value must be 'always' or 'once'")) }) It("podman init containers should not degrade pod status", func() { @@ -54,7 +53,7 @@ var _ = Describe("Podman init containers", func() { // adding init-ctr to running pod should fail session := podmanTest.Podman([]string{"create", "--init-ctr", "always", "--pod", "foobar", ALPINE, "date"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "cannot add init-ctr to a running pod")) }) It("podman make sure init container runs before pod containers", func() { @@ -91,8 +90,8 @@ var _ = Describe("Podman init containers", func() { check := podmanTest.Podman([]string{"container", "exists", initContainerID}) check.WaitWithDefaultTimeout() // Container was rm'd - // Expect(check).Should(Exit(1)) - Expect(check.ExitCode()).To(Equal(1), "I dont understand why the other way does not work") + Expect(check).To(ExitWithError(1, "")) + // Let's double check with a stop and start podmanTest.StopPod("foobar") startPod := podmanTest.Podman([]string{"pod", "start", "foobar"}) @@ -102,7 +101,7 @@ var _ = Describe("Podman init containers", func() { // Because no init was run, the file should not even exist doubleCheck := podmanTest.Podman([]string{"exec", verify.OutputToString(), "cat", filename}) doubleCheck.WaitWithDefaultTimeout() - Expect(doubleCheck).Should(Exit(1)) + Expect(doubleCheck).Should(ExitWithError(1, fmt.Sprintf("cat: can't open '%s': No such file or directory", filename))) }) diff --git a/test/e2e/pod_inspect_test.go b/test/e2e/pod_inspect_test.go index 64a79dc5ba..8a17a59f2d 100644 --- a/test/e2e/pod_inspect_test.go +++ b/test/e2e/pod_inspect_test.go @@ -11,7 +11,11 @@ var _ = Describe("Podman pod inspect", func() { It("podman inspect bogus pod", func() { session := podmanTest.Podman([]string{"pod", "inspect", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).Should(ExitWithError()) + expect := "no such pod foobar" + if IsRemote() { + expect = `no such pod "foobar"` + } + Expect(session).Should(ExitWithError(125, expect)) }) It("podman inspect a pod", func() { diff --git a/test/e2e/pod_kill_test.go b/test/e2e/pod_kill_test.go index c63377202e..7aa1c88cb1 100644 --- a/test/e2e/pod_kill_test.go +++ b/test/e2e/pod_kill_test.go @@ -4,7 +4,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman pod kill", func() { @@ -12,7 +11,11 @@ var _ = Describe("Podman pod kill", func() { It("podman pod kill bogus", func() { session := podmanTest.Podman([]string{"pod", "kill", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + expect := "no pod with name or ID foobar found: no such pod" + if IsRemote() { + expect = `unable to find pod "foobar": no such pod` + } + Expect(session).To(ExitWithError(125, expect)) }) It("podman pod kill a pod by id", func() { @@ -71,7 +74,7 @@ var _ = Describe("Podman pod kill", func() { result := podmanTest.Podman([]string{"pod", "kill", "-s", "bogus", "test1"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + Expect(result).Should(ExitWithError(125, "invalid signal: bogus")) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) }) diff --git a/test/e2e/pod_pause_test.go b/test/e2e/pod_pause_test.go index adb173c441..b4e7c05a84 100644 --- a/test/e2e/pod_pause_test.go +++ b/test/e2e/pod_pause_test.go @@ -16,13 +16,21 @@ var _ = Describe("Podman pod pause", func() { It("podman pod pause bogus pod", func() { session := podmanTest.Podman([]string{"pod", "pause", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + expect := "no pod with name or ID foobar found: no such pod" + if IsRemote() { + expect = `unable to find pod "foobar": no such pod` + } + Expect(session).To(ExitWithError(125, expect)) }) It("podman unpause bogus pod", func() { session := podmanTest.Podman([]string{"pod", "unpause", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + expect := "no pod with name or ID foobar found: no such pod" + if IsRemote() { + expect = `unable to find pod "foobar": no such pod` + } + Expect(session).To(ExitWithError(125, expect)) }) It("podman pod pause a created pod by id", func() { diff --git a/test/e2e/pod_ps_test.go b/test/e2e/pod_ps_test.go index 0a223d8832..e347fd5757 100644 --- a/test/e2e/pod_ps_test.go +++ b/test/e2e/pod_ps_test.go @@ -130,7 +130,7 @@ var _ = Describe("Podman ps", func() { It("podman pod ps mutually exclusive flags", func() { session := podmanTest.Podman([]string{"pod", "ps", "-q", "--format", "{{.ID}}"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "quiet and format cannot be used together")) }) diff --git a/test/e2e/pod_restart_test.go b/test/e2e/pod_restart_test.go index 839b5cc764..8a07533872 100644 --- a/test/e2e/pod_restart_test.go +++ b/test/e2e/pod_restart_test.go @@ -1,10 +1,11 @@ package integration import ( + "fmt" + . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman pod restart", func() { @@ -12,7 +13,11 @@ var _ = Describe("Podman pod restart", func() { It("podman pod restart bogus pod", func() { session := podmanTest.Podman([]string{"pod", "restart", "123"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + expect := "no pod with name or ID 123 found: no such pod" + if IsRemote() { + expect = `unable to find pod "123": no such pod` + } + Expect(session).Should(ExitWithError(125, expect)) }) It("podman pod restart single empty pod", func() { @@ -21,7 +26,7 @@ var _ = Describe("Podman pod restart", func() { session := podmanTest.Podman([]string{"pod", "restart", podid}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, fmt.Sprintf("no containers in pod %s have no dependencies, cannot start pod: no such container", podid))) }) It("podman pod restart single pod by name", func() { @@ -152,6 +157,10 @@ var _ = Describe("Podman pod restart", func() { session = podmanTest.Podman([]string{"pod", "restart", podid1, "doesnotexist"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + expect := "no pod with name or ID doesnotexist found: no such pod" + if IsRemote() { + expect = `unable to find pod "doesnotexist": no such pod` + } + Expect(session).Should(ExitWithError(125, expect)) }) }) diff --git a/test/e2e/pod_rm_test.go b/test/e2e/pod_rm_test.go index f7d15c5acc..cc00d68f98 100644 --- a/test/e2e/pod_rm_test.go +++ b/test/e2e/pod_rm_test.go @@ -11,7 +11,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman pod rm", func() { @@ -122,7 +121,7 @@ var _ = Describe("Podman pod rm", func() { GinkgoWriter.Printf("Removing all empty pods\n") result := podmanTest.Podman([]string{"pod", "rm", "-a"}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) + Expect(result).To(ExitWithError(125, "it is running - running or paused containers cannot be removed without force: container state improper")) Expect(result.ErrorToString()).To(ContainSubstring("not all containers could be removed from pod")) numPods = podmanTest.NumberOfPods() @@ -176,7 +175,11 @@ var _ = Describe("Podman pod rm", func() { It("podman rm bogus pod", func() { session := podmanTest.Podman([]string{"pod", "rm", "bogus"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + expect := "no pod with name or ID bogus found: no such pod" + if IsRemote() { + expect = `unable to find pod "bogus": no such pod` + } + Expect(session).Should(ExitWithError(1, expect)) }) It("podman rm bogus pod and a running pod", func() { @@ -189,11 +192,23 @@ var _ = Describe("Podman pod rm", func() { session = podmanTest.Podman([]string{"pod", "rm", "bogus", "test1"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + expect := "no pod with name or ID bogus found: no such pod" + if IsRemote() { + expect = `unable to find pod "bogus": no such pod` + } + Expect(session).Should(ExitWithError(1, expect)) session = podmanTest.Podman([]string{"pod", "rm", "test1", "bogus"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + // FIXME-someday: consolidate different error messages + expect = "no pod with name or ID test1 found" + if podmanTest.DatabaseBackend == "boltdb" { + expect = "test1 is a container, not a pod" + } + if IsRemote() { + expect = `unable to find pod "test1"` + } + Expect(session).Should(ExitWithError(1, expect+": no such pod")) }) It("podman rm --ignore bogus pod and a running pod", func() { @@ -215,13 +230,12 @@ var _ = Describe("Podman pod rm", func() { }) It("podman pod start/remove single pod via --pod-id-file", func() { - tmpDir := GinkgoT().TempDir() - tmpFile := tmpDir + "podID" + podIDFile := filepath.Join(tempdir, "podID") podName := "rudolph" // Create a pod with --pod-id-file. - session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", tmpFile}) + session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", podIDFile}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) @@ -230,26 +244,24 @@ var _ = Describe("Podman pod rm", func() { session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - session = podmanTest.Podman([]string{"pod", "start", "--pod-id-file", tmpFile}) + session = podmanTest.Podman([]string{"pod", "start", "--pod-id-file", podIDFile}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2)) // infra+top - session = podmanTest.Podman([]string{"pod", "rm", "-t", "0", "--pod-id-file", tmpFile, "--force"}) + session = podmanTest.Podman([]string{"pod", "rm", "-t", "0", "--pod-id-file", podIDFile, "--force"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) }) It("podman pod start/remove multiple pods via --pod-id-file", func() { - tmpDir := GinkgoT().TempDir() - podIDFiles := []string{} for _, i := range "0123456789" { - tmpFile := tmpDir + "cid" + string(i) + cidFile := filepath.Join(tempdir, "cid"+string(i)) podName := "rudolph" + string(i) // Create a pod with --pod-id-file. - session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", tmpFile}) + session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", cidFile}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) @@ -260,7 +272,7 @@ var _ = Describe("Podman pod rm", func() { // Append the id files along with the command. podIDFiles = append(podIDFiles, "--pod-id-file") - podIDFiles = append(podIDFiles, tmpFile) + podIDFiles = append(podIDFiles, cidFile) } cmd := []string{"pod", "start"} diff --git a/test/e2e/pod_start_test.go b/test/e2e/pod_start_test.go index f7ca98c63e..640ff48462 100644 --- a/test/e2e/pod_start_test.go +++ b/test/e2e/pod_start_test.go @@ -3,21 +3,24 @@ package integration import ( "fmt" "os" + "path/filepath" "strconv" "strings" . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman pod start", func() { - It("podman pod start bogus pod", func() { session := podmanTest.Podman([]string{"pod", "start", "123"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + expect := "no pod with name or ID 123 found: no such pod" + if IsRemote() { + expect = `unable to find pod "123": no such pod` + } + Expect(session).Should(ExitWithError(125, expect)) }) It("podman pod start single empty pod", func() { @@ -26,20 +29,22 @@ var _ = Describe("Podman pod start", func() { session := podmanTest.Podman([]string{"pod", "start", podid}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, fmt.Sprintf("no containers in pod %s have no dependencies, cannot start pod: no such container", podid))) }) It("podman pod start single pod by name", func() { - _, ec, _ := podmanTest.CreatePod(map[string][]string{"--name": {"foobar99"}}) + name := "foobar99" + _, ec, _ := podmanTest.CreatePod(map[string][]string{"--name": {name}}) Expect(ec).To(Equal(0)) - session := podmanTest.Podman([]string{"create", "--pod", "foobar99", ALPINE, "ls"}) + session := podmanTest.Podman([]string{"create", "--pod", name, ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - session = podmanTest.Podman([]string{"pod", "start", "foobar99"}) + session = podmanTest.Podman([]string{"pod", "start", name}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) + Expect(session.OutputToString()).Should(ContainSubstring(name)) }) It("podman pod start multiple pods", func() { @@ -61,6 +66,8 @@ var _ = Describe("Podman pod start", func() { session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2)) + Expect(session.OutputToString()).Should(ContainSubstring("foobar99")) + Expect(session.OutputToString()).Should(ContainSubstring("foobar100")) }) It("multiple pods in conflict", func() { @@ -90,7 +97,8 @@ var _ = Describe("Podman pod start", func() { session = podmanTest.Podman([]string{"pod", "start", podid1, podid2}) session.WaitWithDefaultTimeout() - Expect(session).To(Exit(125)) + // Different network backends emit different messages; check only the common part + Expect(session).To(ExitWithError(125, "ddress already in use")) }) It("podman pod start all pods", func() { @@ -149,17 +157,20 @@ var _ = Describe("Podman pod start", func() { session = podmanTest.Podman([]string{"pod", "start", podid, "doesnotexist"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + expect := "no pod with name or ID doesnotexist found: no such pod" + if IsRemote() { + expect = `unable to find pod "doesnotexist": no such pod` + } + Expect(session).Should(ExitWithError(125, expect)) }) It("podman pod start single pod via --pod-id-file", func() { - tmpDir := GinkgoT().TempDir() - tmpFile := tmpDir + "podID" + podIDFile := filepath.Join(tempdir, "podID") podName := "rudolph" // Create a pod with --pod-id-file. - session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", tmpFile}) + session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", podIDFile}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) @@ -168,21 +179,19 @@ var _ = Describe("Podman pod start", func() { session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - session = podmanTest.Podman([]string{"pod", "start", "--pod-id-file", tmpFile}) + session = podmanTest.Podman([]string{"pod", "start", "--pod-id-file", podIDFile}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2)) // infra+top }) It("podman pod start multiple pods via --pod-id-file", func() { - tmpDir := GinkgoT().TempDir() - podIDFiles := []string{} for _, i := range "0123456789" { - tmpFile := tmpDir + "cid" + string(i) + cidFile := filepath.Join(tempdir, "cid"+string(i)) podName := "rudolph" + string(i) // Create a pod with --pod-id-file. - session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", tmpFile}) + session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", cidFile}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) @@ -193,7 +202,7 @@ var _ = Describe("Podman pod start", func() { // Append the id files along with the command. podIDFiles = append(podIDFiles, "--pod-id-file") - podIDFiles = append(podIDFiles, tmpFile) + podIDFiles = append(podIDFiles, cidFile) } cmd := []string{"pod", "start"} @@ -205,12 +214,11 @@ var _ = Describe("Podman pod start", func() { }) It("podman pod create --infra-conmon-pod create + start", func() { - tmpDir := GinkgoT().TempDir() - tmpFile := tmpDir + "podID" + pidFile := filepath.Join(tempdir, "podID") podName := "rudolph" // Create a pod with --infra-conmon-pid. - session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--infra-conmon-pidfile", tmpFile}) + session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--infra-conmon-pidfile", pidFile}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) @@ -227,12 +235,11 @@ var _ = Describe("Podman pod start", func() { // Read the infra-conmon-pidfile and perform some sanity checks // on the pid. - infraConmonPID := readFirstLine(tmpFile) + infraConmonPID := readFirstLine(pidFile) _, err = strconv.Atoi(infraConmonPID) // Make sure it's a proper integer Expect(err).ToNot(HaveOccurred()) cmdline := readFirstLine(fmt.Sprintf("/proc/%s/cmdline", infraConmonPID)) Expect(cmdline).To(ContainSubstring("/conmon")) }) - }) diff --git a/test/e2e/pod_stats_test.go b/test/e2e/pod_stats_test.go index a9dd5e6f1c..6dd760d1bb 100644 --- a/test/e2e/pod_stats_test.go +++ b/test/e2e/pod_stats_test.go @@ -4,7 +4,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman pod stats", func() { @@ -25,7 +24,7 @@ var _ = Describe("Podman pod stats", func() { It("podman pod stats with a bogus pod", func() { session := podmanTest.Podman([]string{"pod", "stats", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "unable to get list of pods: no pod with name or ID foobar found: no such pod")) }) It("podman pod stats on a specific running pod", func() { @@ -151,7 +150,7 @@ var _ = Describe("Podman pod stats", func() { Expect(session).Should(ExitCleanly()) stats := podmanTest.Podman([]string{"pod", "stats", "-a", "--no-reset", "--no-stream", "--format", "\"table {{.ID}} \""}) stats.WaitWithDefaultTimeout() - Expect(stats).To(ExitWithError()) + Expect(stats).To(ExitWithError(125, `template: stats:1:20: executing "stats" at <.ID>: can't evaluate field ID in type *types.PodStatsReport`)) }) It("podman pod stats on net=host post", func() { diff --git a/test/e2e/pod_stop_test.go b/test/e2e/pod_stop_test.go index 47c7335f56..d68621d3f2 100644 --- a/test/e2e/pod_stop_test.go +++ b/test/e2e/pod_stop_test.go @@ -1,10 +1,11 @@ package integration import ( + "path/filepath" + . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman pod stop", func() { @@ -12,7 +13,11 @@ var _ = Describe("Podman pod stop", func() { It("podman pod stop bogus pod", func() { session := podmanTest.Podman([]string{"pod", "stop", "123"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + expect := "no pod with name or ID 123 found: no such pod" + if IsRemote() { + expect = `unable to find pod "123": no such pod` + } + Expect(session).Should(ExitWithError(125, expect)) }) It("podman pod stop --ignore bogus pod", func() { @@ -32,7 +37,11 @@ var _ = Describe("Podman pod stop", func() { session = podmanTest.Podman([]string{"pod", "stop", "bogus", "test1"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + expect := "no pod with name or ID bogus found: no such pod" + if IsRemote() { + expect = `unable to find pod "bogus": no such pod` + } + Expect(session).Should(ExitWithError(125, expect)) }) It("podman stop --ignore bogus pod and a running pod", func() { @@ -153,17 +162,20 @@ var _ = Describe("Podman pod stop", func() { session = podmanTest.Podman([]string{"pod", "stop", podid1, "doesnotexist"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + expect := "no pod with name or ID doesnotexist found: no such pod" + if IsRemote() { + expect = `unable to find pod "doesnotexist": no such pod` + } + Expect(session).Should(ExitWithError(125, expect)) }) It("podman pod start/stop single pod via --pod-id-file", func() { - tmpDir := GinkgoT().TempDir() - tmpFile := tmpDir + "podID" + podIDFile := filepath.Join(tempdir, "podID") podName := "rudolph" // Create a pod with --pod-id-file. - session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", tmpFile}) + session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", podIDFile}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) @@ -172,26 +184,24 @@ var _ = Describe("Podman pod stop", func() { session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - session = podmanTest.Podman([]string{"pod", "start", "--pod-id-file", tmpFile}) + session = podmanTest.Podman([]string{"pod", "start", "--pod-id-file", podIDFile}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2)) // infra+top - session = podmanTest.Podman([]string{"pod", "stop", "--pod-id-file", tmpFile}) + session = podmanTest.Podman([]string{"pod", "stop", "--pod-id-file", podIDFile}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) }) It("podman pod start/stop multiple pods via --pod-id-file", func() { - tmpDir := GinkgoT().TempDir() - podIDFiles := []string{} for _, i := range "0123456789" { - tmpFile := tmpDir + "cid" + string(i) + podIDFile := filepath.Join(tempdir, "cid"+string(i)) podName := "rudolph" + string(i) // Create a pod with --pod-id-file. - session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", tmpFile}) + session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", podIDFile}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) @@ -202,7 +212,7 @@ var _ = Describe("Podman pod stop", func() { // Append the id files along with the command. podIDFiles = append(podIDFiles, "--pod-id-file") - podIDFiles = append(podIDFiles, tmpFile) + podIDFiles = append(podIDFiles, podIDFile) } cmd := []string{"pod", "start"} diff --git a/test/e2e/pod_top_test.go b/test/e2e/pod_top_test.go index 52b0917e5a..e104902e57 100644 --- a/test/e2e/pod_top_test.go +++ b/test/e2e/pod_top_test.go @@ -7,7 +7,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman top", func() { @@ -15,13 +14,17 @@ var _ = Describe("Podman top", func() { It("podman pod top without pod name or id", func() { result := podmanTest.Podman([]string{"pod", "top"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + Expect(result).Should(ExitWithError(125, "you must provide the name or id of a running pod")) }) It("podman pod top on bogus pod", func() { result := podmanTest.Podman([]string{"pod", "top", "1234"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + expect := "no pod with name or ID 1234 found: no such pod" + if !IsRemote() { + expect = "unable to look up requested container: " + expect + } + Expect(result).Should(ExitWithError(125, expect)) }) It("podman pod top on non-running pod", func() { @@ -30,7 +33,11 @@ var _ = Describe("Podman top", func() { result := podmanTest.Podman([]string{"top", podid}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + expect := fmt.Sprintf(`no container with name or ID "%s" found: no such container`, podid) + if !IsRemote() { + expect = "unable to look up requested container: " + expect + } + Expect(result).Should(ExitWithError(125, expect)) }) It("podman pod top on pod", func() { @@ -78,7 +85,7 @@ var _ = Describe("Podman top", func() { // the wrong input and still print the -ef output instead. result := podmanTest.Podman([]string{"pod", "top", podid, "-eo", "invalid"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + Expect(result).Should(ExitWithError(125, "Error: '-eo': unknown descriptor")) }) It("podman pod top on pod with containers in same pid namespace", func() { diff --git a/test/e2e/port_test.go b/test/e2e/port_test.go index 281c7b776d..591fd9eebf 100644 --- a/test/e2e/port_test.go +++ b/test/e2e/port_test.go @@ -14,13 +14,17 @@ var _ = Describe("Podman port", func() { It("podman port all and latest", func() { result := podmanTest.Podman([]string{"port", "-a", "-l"}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) + if IsRemote() { + Expect(result).To(ExitWithError(125, "unknown shorthand flag: 'l' in -l")) + } else { + Expect(result).To(ExitWithError(125, "--all and --latest cannot be used together")) + } }) It("podman port all and extra", func() { result := podmanTest.Podman([]string{"port", "-a", "foobar"}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) + Expect(result).To(ExitWithError(125, "no arguments are needed with --all")) }) It("podman port -l nginx", func() { @@ -106,12 +110,12 @@ var _ = Describe("Podman port", func() { It("podman port multiple ports", func() { // Acquire and release locks - lock1 := GetPortLock("5000") + lock1 := GetPortLock("5010") defer lock1.Unlock() - lock2 := GetPortLock("5001") + lock2 := GetPortLock("5011") defer lock2.Unlock() - setup := podmanTest.Podman([]string{"run", "--name", "test", "-dt", "-p", "5000:5000", "-p", "5001:5001", ALPINE, "top"}) + setup := podmanTest.Podman([]string{"run", "--name", "test", "-dt", "-p", "5010:5000", "-p", "5011:5001", ALPINE, "top"}) setup.WaitWithDefaultTimeout() Expect(setup).Should(ExitCleanly()) @@ -119,12 +123,12 @@ var _ = Describe("Podman port", func() { result1 := podmanTest.Podman([]string{"port", "test", "5000"}) result1.WaitWithDefaultTimeout() Expect(result1).Should(ExitCleanly()) - Expect(result1.OutputToStringArray()).To(ContainElement(HavePrefix("0.0.0.0:5000"))) + Expect(result1.OutputToStringArray()).To(ContainElement(HavePrefix("0.0.0.0:5010"))) // Check that the second port was honored result2 := podmanTest.Podman([]string{"port", "test", "5001"}) result2.WaitWithDefaultTimeout() Expect(result2).Should(ExitCleanly()) - Expect(result2.OutputToStringArray()).To(ContainElement(HavePrefix("0.0.0.0:5001"))) + Expect(result2.OutputToStringArray()).To(ContainElement(HavePrefix("0.0.0.0:5011"))) }) }) diff --git a/test/e2e/prune_test.go b/test/e2e/prune_test.go index c2f0322cd8..7943d7cfde 100644 --- a/test/e2e/prune_test.go +++ b/test/e2e/prune_test.go @@ -8,7 +8,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var pruneImage = fmt.Sprintf(` @@ -492,8 +491,7 @@ var _ = Describe("Podman prune", func() { It("podman system prune --all --external fails", func() { prune := podmanTest.Podman([]string{"system", "prune", "--all", "--external"}) prune.WaitWithDefaultTimeout() - Expect(prune).Should(Exit(125)) - Expect(prune.ErrorToString()).To(ContainSubstring("--external cannot be combined with other options")) + Expect(prune).Should(ExitWithError(125, "--external cannot be combined with other options")) }) It("podman system prune --external leaves referenced containers", func() { diff --git a/test/e2e/ps_test.go b/test/e2e/ps_test.go index 67fdea900c..280f1b7af6 100644 --- a/test/e2e/ps_test.go +++ b/test/e2e/ps_test.go @@ -406,11 +406,11 @@ var _ = Describe("Podman ps", func() { It("podman ps mutually exclusive flags", func() { session := podmanTest.Podman([]string{"ps", "-aqs"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "quiet conflicts with size and namespace")) session = podmanTest.Podman([]string{"ps", "-a", "--ns", "-s"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "size and namespace options conflict")) }) It("podman --format by size", func() { @@ -540,8 +540,7 @@ var _ = Describe("Podman ps", func() { "run", "-p", "1000-2000:2000-3000", "-p", "1999-2999:3001-4001", ALPINE, }) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("conflicting port mappings for host port 1999")) + Expect(session).Should(ExitWithError(125, "conflicting port mappings for host port 1999 (protocol tcp)")) }) It("podman ps test with multiple port range", func() { @@ -666,7 +665,7 @@ var _ = Describe("Podman ps", func() { session = podmanTest.Podman([]string{"run", "--name", "test2", "--label", "foo=1", ALPINE, "ls", "/fail"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "ls: /fail: No such file or directory")) session = podmanTest.Podman([]string{"create", "--name", "test3", ALPINE, cid1}) session.WaitWithDefaultTimeout() diff --git a/test/e2e/pull_test.go b/test/e2e/pull_test.go index 5a3f07f8b1..a38305acf3 100644 --- a/test/e2e/pull_test.go +++ b/test/e2e/pull_test.go @@ -21,9 +21,7 @@ var _ = Describe("Podman pull", func() { session = podmanTest.Podman([]string{"pull", "busybox:latest", "docker.io/library/ibetthisdoesnotexistfr:random", "alpine"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - expectedError := "initializing source docker://ibetthisdoesnotexistfr:random" - Expect(session.ErrorToString()).To(ContainSubstring(expectedError)) + Expect(session).Should(ExitWithError(125, "initializing source docker://ibetthisdoesnotexistfr:random: reading manifest random in quay.io/libpod/ibetthisdoesnotexistfr:")) session = podmanTest.Podman([]string{"rmi", "busybox:musl", "alpine", "quay.io/libpod/cirros", "testdigest_v2s2@sha256:755f4d90b3716e2bf57060d249e2cd61c9ac089b1233465c5c2cb2d7ee550fdb"}) session.WaitWithDefaultTimeout() @@ -31,11 +29,16 @@ var _ = Describe("Podman pull", func() { }) It("podman pull bogus image", func() { + // This is a NOP in CI; but in a developer environment, if user + // has a valid login to quay.io, pull fails with "repository not found" + defer func() { + os.Unsetenv("REGISTRY_AUTH_FILE") + }() + os.Setenv("REGISTRY_AUTH_FILE", "/tmp/this/does/not/exist") + session := podmanTest.Podman([]string{"pull", "quay.io/libpod/ibetthisdoesntexist:there"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - // "Not authorized", not "Not Found", because that's how registries roll?? - Expect(session.ErrorToString()).To(ContainSubstring("unauthorized: access to the requested resource is not authorized")) + Expect(session).To(ExitWithError(125, "nitializing source docker://quay.io/libpod/ibetthisdoesntexist:there: reading manifest there in quay.io/libpod/ibetthisdoesntexist: unauthorized: access to the requested resource is not authorized")) }) It("podman pull with tag --quiet", func() { @@ -141,7 +144,7 @@ var _ = Describe("Podman pull", func() { // Without a tag/digest the input is normalized with the "latest" tag, see #11964 session = podmanTest.Podman([]string{"rmi", "testdigest_v2s2"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "testdigest_v2s2: image not known")) session = podmanTest.Podman([]string{"rmi", "testdigest_v2s2@sha256:755f4d90b3716e2bf57060d249e2cd61c9ac089b1233465c5c2cb2d7ee550fdb"}) session.WaitWithDefaultTimeout() @@ -171,8 +174,7 @@ var _ = Describe("Podman pull", func() { It("podman pull from docker with nonexistent --authfile", func() { session := podmanTest.Podman([]string{"pull", "-q", "--authfile", "/tmp/nonexistent", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(Equal("Error: credential file is not accessible: stat /tmp/nonexistent: no such file or directory")) + Expect(session).To(ExitWithError(125, "credential file is not accessible: faccessat /tmp/nonexistent: no such file or directory")) }) It("podman pull by digest (image list)", func() { @@ -222,14 +224,16 @@ var _ = Describe("Podman pull", func() { session := podmanTest.Podman([]string{"pull", "-q", "--arch=arm64", ALPINEARM64DIGEST}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) + // inspect using the digest of the list session = podmanTest.Podman([]string{"inspect", "--format", "{{.RepoTags}}", ALPINELISTDIGEST}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, fmt.Sprintf(`no such object: "%s"`, ALPINELISTDIGEST))) // inspect using the digest of the list session = podmanTest.Podman([]string{"inspect", "--format", "{{.RepoDigests}}", ALPINELISTDIGEST}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, fmt.Sprintf(`no such object: "%s"`, ALPINELISTDIGEST))) + // inspect using the digest of the arch-specific image's manifest session = podmanTest.Podman([]string{"inspect", "--format", "{{.RepoTags}}", ALPINEARM64DIGEST}) session.WaitWithDefaultTimeout() @@ -336,9 +340,7 @@ var _ = Describe("Podman pull", func() { // image. session = podmanTest.Podman([]string{"pull", "-q", "docker-archive:./testdata/docker-two-images.tar.xz"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - expectedError := "Unexpected tar manifest.json: expected 1 item, got 2" - Expect(session.ErrorToString()).To(ContainSubstring(expectedError)) + Expect(session).Should(ExitWithError(125, "Unexpected tar manifest.json: expected 1 item, got 2")) // Now pull _one_ image from a multi-image archive via the name // and index syntax. @@ -361,15 +363,11 @@ var _ = Describe("Podman pull", func() { // Now check for some errors. session = podmanTest.Podman([]string{"pull", "-q", "docker-archive:./testdata/docker-two-images.tar.xz:foo.com/does/not/exist:latest"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - expectedError = "Tag \"foo.com/does/not/exist:latest\" not found" - Expect(session.ErrorToString()).To(ContainSubstring(expectedError)) + Expect(session).Should(ExitWithError(125, `Tag "foo.com/does/not/exist:latest" not found`)) session = podmanTest.Podman([]string{"pull", "-q", "docker-archive:./testdata/docker-two-images.tar.xz:@2"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - expectedError = "Invalid source index @2, only 2 manifest items available" - Expect(session.ErrorToString()).To(ContainSubstring(expectedError)) + Expect(session).Should(ExitWithError(125, "Invalid source index @2, only 2 manifest items available")) }) It("podman pull from oci-archive", func() { @@ -415,7 +413,7 @@ var _ = Describe("Podman pull", func() { // Note that reference is not preserved in dir. session = podmanTest.Podman([]string{"image", "exists", "cirros"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "")) }) It("podman pull from local OCI directory", func() { @@ -544,15 +542,11 @@ var _ = Describe("Podman pull", func() { It("podman pull --platform", func() { session := podmanTest.Podman([]string{"pull", "-q", "--platform=linux/bogus", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - expectedError := "no image found in manifest list for architecture bogus" - Expect(session.ErrorToString()).To(ContainSubstring(expectedError)) + Expect(session).Should(ExitWithError(125, `no image found in manifest list for architecture "bogus"`)) session = podmanTest.Podman([]string{"pull", "-q", "--platform=linux/arm64", "--os", "windows", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - expectedError = "--platform option can not be specified with --arch or --os" - Expect(session.ErrorToString()).To(ContainSubstring(expectedError)) + Expect(session).Should(ExitWithError(125, "--platform option can not be specified with --arch or --os")) session = podmanTest.Podman([]string{"pull", "-q", "--platform=linux/arm64", ALPINE}) session.WaitWithDefaultTimeout() @@ -571,15 +565,11 @@ var _ = Describe("Podman pull", func() { It("podman pull --arch", func() { session := podmanTest.Podman([]string{"pull", "-q", "--arch=bogus", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - expectedError := "no image found in manifest list for architecture bogus" - Expect(session.ErrorToString()).To(ContainSubstring(expectedError)) + Expect(session).Should(ExitWithError(125, `no image found in manifest list for architecture "bogus"`)) session = podmanTest.Podman([]string{"pull", "-q", "--arch=arm64", "--os", "windows", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - expectedError = "no image found in manifest list for architecture" - Expect(session.ErrorToString()).To(ContainSubstring(expectedError)) + Expect(session).Should(ExitWithError(125, "no image found in manifest list for architecture")) session = podmanTest.Podman([]string{"pull", "-q", "--arch=arm64", ALPINE}) session.WaitWithDefaultTimeout() @@ -610,7 +600,7 @@ var _ = Describe("Podman pull", func() { Describe("podman pull and decrypt", func() { - decryptionTestHelper := func(imgPath string) *PodmanSessionIntegration { + decryptionTestHelper := func(imgPath string, expectedError1 string) *PodmanSessionIntegration { bitSize := 1024 keyFileName := filepath.Join(podmanTest.TempDir, "key,withcomma") publicKeyFileName, privateKeyFileName, err := WriteRSAKeyPair(keyFileName, bitSize) @@ -630,15 +620,15 @@ var _ = Describe("Podman pull", func() { // Pulling encrypted image without key should fail session = podmanTest.Podman([]string{"pull", imgPath}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, expectedError1)) // Pulling encrypted image with wrong key should fail - session = podmanTest.Podman([]string{"pull", "-q", "--decryption-key", wrongPrivateKeyFileName, imgPath}) + session = podmanTest.Podman([]string{"pull", "-q", "--decryption-key", wrongPrivateKeyFileName, "--tls-verify=false", imgPath}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "no suitable key unwrapper found or none of the private keys could be used for decryption")) // Pulling encrypted image with correct key should pass - session = podmanTest.Podman([]string{"pull", "-q", "--decryption-key", privateKeyFileName, imgPath}) + session = podmanTest.Podman([]string{"pull", "-q", "--decryption-key", privateKeyFileName, "--tls-verify=false", imgPath}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) session = podmanTest.Podman([]string{"images"}) @@ -657,7 +647,7 @@ var _ = Describe("Podman pull", func() { imgName := "localhost/name:tag" imgPath := fmt.Sprintf("oci:%s:%s", bbdir, imgName) - session := decryptionTestHelper(imgPath) + session := decryptionTestHelper(imgPath, "invalid tar header") Expect(session.LineInOutputContainsTag("localhost/name", "tag")).To(BeTrue()) }) @@ -675,9 +665,9 @@ var _ = Describe("Podman pull", func() { err := podmanTest.RestoreArtifact(REGISTRY_IMAGE) Expect(err).ToNot(HaveOccurred()) } - lock := GetPortLock("5000") + lock := GetPortLock("5012") defer lock.Unlock() - session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", "5000:5000", REGISTRY_IMAGE, "/entrypoint.sh", "/etc/docker/registry/config.yml"}) + session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", "5012:5000", REGISTRY_IMAGE, "/entrypoint.sh", "/etc/docker/registry/config.yml"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) @@ -685,9 +675,9 @@ var _ = Describe("Podman pull", func() { Skip("Cannot start docker registry.") } - imgPath := "localhost:5000/my-alpine" + imgPath := "localhost:5012/my-alpine" - session = decryptionTestHelper(imgPath) + session = decryptionTestHelper(imgPath, `initializing source docker://localhost:5012/my-alpine:latest: pinging container registry localhost:5012: Get "https://localhost:5012/v2/": http: server gave HTTP response to HTTPS client`) Expect(session.LineInOutputContainsTag(imgPath, "latest")).To(BeTrue()) }) diff --git a/test/e2e/push_test.go b/test/e2e/push_test.go index 3da5536003..0489c4f091 100644 --- a/test/e2e/push_test.go +++ b/test/e2e/push_test.go @@ -53,9 +53,7 @@ var _ = Describe("Podman push", func() { // Invalid compression format specified, it must fail session := podmanTest.Podman([]string{"push", "-q", "--compression-format=gzip", "--compression-level=40", ALPINE, fmt.Sprintf("oci:%s", bbdir)}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - output := session.ErrorToString() - Expect(output).To(ContainSubstring("invalid compression level")) + Expect(session).Should(ExitWithError(125, "writing blob: happened during read: gzip: invalid compression level: 40")) session = podmanTest.Podman([]string{"push", "-q", "--compression-format=zstd", "--remove-signatures", ALPINE, fmt.Sprintf("oci:%s", bbdir)}) @@ -92,9 +90,9 @@ var _ = Describe("Podman push", func() { err := podmanTest.RestoreArtifact(REGISTRY_IMAGE) Expect(err).ToNot(HaveOccurred()) } - lock := GetPortLock("5000") + lock := GetPortLock("5002") defer lock.Unlock() - session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", "5000:5000", REGISTRY_IMAGE, "/entrypoint.sh", "/etc/docker/registry/config.yml"}) + session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", "5002:5000", REGISTRY_IMAGE, "/entrypoint.sh", "/etc/docker/registry/config.yml"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) @@ -106,19 +104,18 @@ var _ = Describe("Podman push", func() { session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - push := podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--remove-signatures", "imageone", "localhost:5000/image"}) + push := podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--force-compression=true", "--compression-format", "gzip", "--remove-signatures", "imageone", "localhost:5002/image"}) push.WaitWithDefaultTimeout() Expect(push).Should(ExitCleanly()) - skopeoInspect := []string{"inspect", "--tls-verify=false", "--raw", "docker://localhost:5000/image:latest"} + skopeoInspect := []string{"inspect", "--tls-verify=false", "--raw", "docker://localhost:5002/image:latest"} skopeo := SystemExec("skopeo", skopeoInspect) skopeo.WaitWithDefaultTimeout() Expect(skopeo).Should(ExitCleanly()) output := skopeo.OutputToString() - // Default compression is gzip and push with `--force-compression=false` no traces of `zstd` should be there. Expect(output).ToNot(ContainSubstring("zstd")) - push = podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--force-compression=false", "--compression-format", "zstd", "--remove-signatures", "imageone", "localhost:5000/image"}) + push = podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--force-compression=false", "--compression-format", "zstd", "--remove-signatures", "imageone", "localhost:5002/image"}) push.WaitWithDefaultTimeout() Expect(push).Should(ExitCleanly()) @@ -130,7 +127,7 @@ var _ = Describe("Podman push", func() { // since blobs must be reused from last `gzip` image. Expect(output).ToNot(ContainSubstring("zstd")) - push = podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--compression-format", "zstd", "--force-compression", "--remove-signatures", "imageone", "localhost:5000/image"}) + push = podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--compression-format", "zstd", "--force-compression", "--remove-signatures", "imageone", "localhost:5002/image"}) push.WaitWithDefaultTimeout() Expect(push).Should(ExitCleanly()) @@ -150,9 +147,9 @@ var _ = Describe("Podman push", func() { err := podmanTest.RestoreArtifact(REGISTRY_IMAGE) Expect(err).ToNot(HaveOccurred()) } - lock := GetPortLock("5000") + lock := GetPortLock("5003") defer lock.Unlock() - session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", "5000:5000", REGISTRY_IMAGE, "/entrypoint.sh", "/etc/docker/registry/config.yml"}) + session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", "5003:5000", REGISTRY_IMAGE, "/entrypoint.sh", "/etc/docker/registry/config.yml"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) @@ -160,11 +157,11 @@ var _ = Describe("Podman push", func() { Skip("Cannot start docker registry.") } - push := podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"}) + push := podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5003/my-alpine"}) push.WaitWithDefaultTimeout() Expect(push).Should(ExitCleanly()) - push = podmanTest.Podman([]string{"push", "--compression-format=gzip", "--compression-level=1", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"}) + push = podmanTest.Podman([]string{"push", "--compression-format=gzip", "--compression-level=1", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5003/my-alpine"}) push.WaitWithDefaultTimeout() Expect(push).Should(Exit(0)) output := push.ErrorToString() @@ -178,14 +175,14 @@ var _ = Describe("Podman push", func() { Expect(err).ToNot(HaveOccurred()) if !IsRemote() { // Remote does not support --encryption-key - push = podmanTest.Podman([]string{"push", "-q", "--encryption-key", "jwe:" + publicKeyFileName, "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"}) + push = podmanTest.Podman([]string{"push", "-q", "--encryption-key", "jwe:" + publicKeyFileName, "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5003/my-alpine"}) push.WaitWithDefaultTimeout() Expect(push).Should(ExitCleanly()) } // Test --digestfile option digestFile := filepath.Join(podmanTest.TempDir, "digestfile.txt") - push2 := podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--digestfile=" + digestFile, "--remove-signatures", ALPINE, "localhost:5000/my-alpine"}) + push2 := podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--digestfile=" + digestFile, "--remove-signatures", ALPINE, "localhost:5003/my-alpine"}) push2.WaitWithDefaultTimeout() fi, err := os.Lstat(digestFile) Expect(err).ToNot(HaveOccurred()) @@ -208,45 +205,43 @@ var _ = Describe("Podman push", func() { Expect(err).ToNot(HaveOccurred()) }() // Generate a signature verification policy file - policyPath := generatePolicyFile(podmanTest.TempDir) + policyPath := generatePolicyFile(podmanTest.TempDir, 5003) defer os.Remove(policyPath) // Verify that the policy rejects unsigned images - push := podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/sigstore-signed"}) + push := podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5003/sigstore-signed"}) push.WaitWithDefaultTimeout() Expect(push).Should(ExitCleanly()) - pull := podmanTest.Podman([]string{"pull", "-q", "--tls-verify=false", "--signature-policy", policyPath, "localhost:5000/sigstore-signed"}) + pull := podmanTest.Podman([]string{"pull", "-q", "--tls-verify=false", "--signature-policy", policyPath, "localhost:5003/sigstore-signed"}) pull.WaitWithDefaultTimeout() - Expect(pull).To(ExitWithError()) - Expect(pull.ErrorToString()).To(ContainSubstring("A signature was required, but no signature exists")) + Expect(pull).To(ExitWithError(125, "A signature was required, but no signature exists")) // Sign an image, and verify it is accepted. - push = podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--remove-signatures", "--sign-by-sigstore-private-key", "testdata/sigstore-key.key", "--sign-passphrase-file", "testdata/sigstore-key.key.pass", ALPINE, "localhost:5000/sigstore-signed"}) + push = podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--remove-signatures", "--sign-by-sigstore-private-key", "testdata/sigstore-key.key", "--sign-passphrase-file", "testdata/sigstore-key.key.pass", ALPINE, "localhost:5003/sigstore-signed"}) push.WaitWithDefaultTimeout() Expect(push).Should(ExitCleanly()) - pull = podmanTest.Podman([]string{"pull", "-q", "--tls-verify=false", "--signature-policy", policyPath, "localhost:5000/sigstore-signed"}) + pull = podmanTest.Podman([]string{"pull", "-q", "--tls-verify=false", "--signature-policy", policyPath, "localhost:5003/sigstore-signed"}) pull.WaitWithDefaultTimeout() Expect(pull).Should(ExitCleanly()) By("pushing and pulling with --sign-by-sigstore") // Verify that the policy rejects unsigned images - push = podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/sigstore-signed-params"}) + push = podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5003/sigstore-signed-params"}) push.WaitWithDefaultTimeout() Expect(push).Should(ExitCleanly()) - pull = podmanTest.Podman([]string{"pull", "-q", "--tls-verify=false", "--signature-policy", policyPath, "localhost:5000/sigstore-signed-params"}) + pull = podmanTest.Podman([]string{"pull", "-q", "--tls-verify=false", "--signature-policy", policyPath, "localhost:5003/sigstore-signed-params"}) pull.WaitWithDefaultTimeout() - Expect(pull).To(ExitWithError()) - Expect(pull.ErrorToString()).To(ContainSubstring("A signature was required, but no signature exists")) + Expect(pull).To(ExitWithError(125, "A signature was required, but no signature exists")) // Sign an image, and verify it is accepted. - push = podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--remove-signatures", "--sign-by-sigstore", "testdata/sigstore-signing-params.yaml", ALPINE, "localhost:5000/sigstore-signed-params"}) + push = podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--remove-signatures", "--sign-by-sigstore", "testdata/sigstore-signing-params.yaml", ALPINE, "localhost:5003/sigstore-signed-params"}) push.WaitWithDefaultTimeout() Expect(push).Should(ExitCleanly()) - pull = podmanTest.Podman([]string{"pull", "-q", "--tls-verify=false", "--signature-policy", policyPath, "localhost:5000/sigstore-signed-params"}) + pull = podmanTest.Podman([]string{"pull", "-q", "--tls-verify=false", "--signature-policy", policyPath, "localhost:5003/sigstore-signed-params"}) pull.WaitWithDefaultTimeout() Expect(pull).Should(ExitCleanly()) } @@ -276,14 +271,14 @@ var _ = Describe("Podman push", func() { authPath := filepath.Join(podmanTest.TempDir, "auth") err = os.Mkdir(authPath, os.ModePerm) Expect(err).ToNot(HaveOccurred()) - err = os.MkdirAll("/etc/containers/certs.d/localhost:5000", os.ModePerm) + err = os.MkdirAll("/etc/containers/certs.d/localhost:5004", os.ModePerm) Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll("/etc/containers/certs.d/localhost:5000") + defer os.RemoveAll("/etc/containers/certs.d/localhost:5004") cwd, _ := os.Getwd() certPath := filepath.Join(cwd, "../", "certs") - lock := GetPortLock("5000") + lock := GetPortLock("5004") defer lock.Unlock() htpasswd := SystemExec("htpasswd", []string{"-Bbn", "podmantest", "test"}) htpasswd.WaitWithDefaultTimeout() @@ -298,7 +293,7 @@ var _ = Describe("Podman push", func() { err = f.Sync() Expect(err).ToNot(HaveOccurred()) - session := podmanTest.Podman([]string{"run", "-d", "-p", "5000:5000", "--name", "registry", "-v", + session := podmanTest.Podman([]string{"run", "-d", "-p", "5004:5000", "--name", "registry", "-v", strings.Join([]string{authPath, "/auth", "z"}, ":"), "-e", "REGISTRY_AUTH=htpasswd", "-e", "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm", "-e", "REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd", "-v", strings.Join([]string{certPath, "/certs", "z"}, ":"), "-e", "REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt", @@ -308,43 +303,40 @@ var _ = Describe("Podman push", func() { Expect(WaitContainerReady(podmanTest, "registry", "listening on", 20, 1)).To(BeTrue(), "registry container ready") - push := podmanTest.Podman([]string{"push", "--tls-verify=true", "--format=v2s2", "--creds=podmantest:test", ALPINE, "localhost:5000/tlstest"}) + push := podmanTest.Podman([]string{"push", "--tls-verify=true", "--format=v2s2", "--creds=podmantest:test", ALPINE, "localhost:5004/tlstest"}) push.WaitWithDefaultTimeout() - Expect(push).To(ExitWithError()) - Expect(push.ErrorToString()).To(ContainSubstring("x509: certificate signed by unknown authority")) + Expect(push).To(ExitWithError(125, "x509: certificate signed by unknown authority")) - push = podmanTest.Podman([]string{"push", "--creds=podmantest:test", "--tls-verify=false", ALPINE, "localhost:5000/tlstest"}) + push = podmanTest.Podman([]string{"push", "--creds=podmantest:test", "--tls-verify=false", ALPINE, "localhost:5004/tlstest"}) push.WaitWithDefaultTimeout() Expect(push).Should(Exit(0)) Expect(push.ErrorToString()).To(ContainSubstring("Writing manifest to image destination")) - setup := SystemExec("cp", []string{filepath.Join(certPath, "domain.crt"), "/etc/containers/certs.d/localhost:5000/ca.crt"}) + setup := SystemExec("cp", []string{filepath.Join(certPath, "domain.crt"), "/etc/containers/certs.d/localhost:5004/ca.crt"}) Expect(setup).Should(ExitCleanly()) - push = podmanTest.Podman([]string{"push", "--creds=podmantest:wrongpasswd", ALPINE, "localhost:5000/credstest"}) + push = podmanTest.Podman([]string{"push", "--creds=podmantest:wrongpasswd", ALPINE, "localhost:5004/credstest"}) push.WaitWithDefaultTimeout() - Expect(push).To(ExitWithError()) - Expect(push.ErrorToString()).To(ContainSubstring("/credstest: authentication required")) + Expect(push).To(ExitWithError(125, "/credstest: authentication required")) if !IsRemote() { // remote does not support --cert-dir - push = podmanTest.Podman([]string{"push", "--tls-verify=true", "--creds=podmantest:test", "--cert-dir=fakedir", ALPINE, "localhost:5000/certdirtest"}) + push = podmanTest.Podman([]string{"push", "--tls-verify=true", "--creds=podmantest:test", "--cert-dir=fakedir", ALPINE, "localhost:5004/certdirtest"}) push.WaitWithDefaultTimeout() - Expect(push).To(ExitWithError()) - Expect(push.ErrorToString()).To(ContainSubstring("x509: certificate signed by unknown authority")) + Expect(push).To(ExitWithError(125, "x509: certificate signed by unknown authority")) } - push = podmanTest.Podman([]string{"push", "--creds=podmantest:test", ALPINE, "localhost:5000/defaultflags"}) + push = podmanTest.Podman([]string{"push", "--creds=podmantest:test", ALPINE, "localhost:5004/defaultflags"}) push.WaitWithDefaultTimeout() Expect(push).Should(Exit(0)) Expect(push.ErrorToString()).To(ContainSubstring("Writing manifest to image destination")) // create and push manifest - session = podmanTest.Podman([]string{"manifest", "create", "localhost:5000/manifesttest"}) + session = podmanTest.Podman([]string{"manifest", "create", "localhost:5004/manifesttest"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - session = podmanTest.Podman([]string{"manifest", "push", "--creds=podmantest:test", "--tls-verify=false", "--all", "localhost:5000/manifesttest"}) + session = podmanTest.Podman([]string{"manifest", "push", "--creds=podmantest:test", "--tls-verify=false", "--all", "localhost:5004/manifesttest"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.ErrorToString()).To(ContainSubstring("Writing manifest list to image destination")) diff --git a/test/e2e/quadlet/annotation.build b/test/e2e/quadlet/annotation.build new file mode 100644 index 0000000000..6f0fd9281a --- /dev/null +++ b/test/e2e/quadlet/annotation.build @@ -0,0 +1,14 @@ +## assert-podman-final-args-regex /.*/podman-e2e-.*/subtest-.*/quadlet +## assert-podman-args "--annotation" "org.foo.Arg0=arg0" +## assert-podman-args "--annotation" "org.foo.Arg1=arg1" +## assert-podman-args "--annotation" "org.foo.Arg2=arg 2" +## assert-podman-args "--annotation" "org.foo.Arg3=arg3" +## assert-podman-args --tag=localhost/imagename + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit +Annotation=org.foo.Arg1=arg1 "org.foo.Arg2=arg 2" \ + org.foo.Arg3=arg3 + +Annotation=org.foo.Arg0=arg0 diff --git a/test/e2e/quadlet/arch.build b/test/e2e/quadlet/arch.build new file mode 100644 index 0000000000..38c597db89 --- /dev/null +++ b/test/e2e/quadlet/arch.build @@ -0,0 +1,8 @@ +## assert-podman-final-args-regex /.*/podman-e2e-.*/subtest-.*/quadlet +## assert-podman-args --tag=localhost/imagename +## assert-podman-args --arch=aarch64 + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit +Arch=aarch64 diff --git a/test/e2e/quadlet/authfile.build b/test/e2e/quadlet/authfile.build new file mode 100644 index 0000000000..42d74118d8 --- /dev/null +++ b/test/e2e/quadlet/authfile.build @@ -0,0 +1,8 @@ +## assert-podman-final-args-regex /.*/podman-e2e-.*/subtest-.*/quadlet +## assert-podman-args --tag=localhost/imagename +## assert-podman-args --authfile=/etc/certs/auth.json + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit +AuthFile=/etc/certs/auth.json diff --git a/test/e2e/quadlet/basic.build b/test/e2e/quadlet/basic.build new file mode 100644 index 0000000000..878d2787c8 --- /dev/null +++ b/test/e2e/quadlet/basic.build @@ -0,0 +1,13 @@ +## assert-podman-final-args-regex /.*/podman-e2e-.*/subtest-.*/quadlet +## assert-podman-args --tag=localhost/imagename +## assert-key-is "Unit" "After" "network-online.target" +## assert-key-is "Unit" "Wants" "network-online.target" +## assert-key-is "Unit" "RequiresMountsFor" "%t/containers" +## assert-key-is-regex "Service" "WorkingDirectory" "/.*/podman-e2e-.*/subtest-.*/quadlet" +## assert-key-is "Service" "Type" "oneshot" +## assert-key-is "Service" "RemainAfterExit" "yes" +## assert-key-is "Service" "SyslogIdentifier" "%N" + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit diff --git a/test/e2e/quadlet/basic.container b/test/e2e/quadlet/basic.container index 3a3a98f695..3d507c884a 100644 --- a/test/e2e/quadlet/basic.container +++ b/test/e2e/quadlet/basic.container @@ -15,6 +15,8 @@ ## assert-key-is-regex "Service" "ExecStopPost" "-[/S].*/podman rm -v -f -i --cidfile=%t/%N.cid" ## assert-key-is-regex "Service" "ExecStop" ".*/podman rm -v -f -i --cidfile=%t/%N.cid" ## assert-key-is "Service" "Environment" "PODMAN_SYSTEMD_UNIT=%n" +## assert-key-is "Unit" "After" "network-online.target" +## assert-key-is "Unit" "Wants" "network-online.target" [Container] Image=localhost/imagename diff --git a/test/e2e/quadlet/basic.image b/test/e2e/quadlet/basic.image index b0502a7a57..9a30765e52 100644 --- a/test/e2e/quadlet/basic.image +++ b/test/e2e/quadlet/basic.image @@ -1,4 +1,6 @@ ## assert-podman-final-args localhost/imagename +## assert-key-is "Unit" "After" "network-online.target" +## assert-key-is "Unit" "Wants" "network-online.target" ## assert-key-is "Unit" "RequiresMountsFor" "%t/containers" ## assert-key-is "Service" "Type" "oneshot" ## assert-key-is "Service" "RemainAfterExit" "yes" diff --git a/test/e2e/quadlet/basic.kube b/test/e2e/quadlet/basic.kube index 1f2bc16dc6..0c5bb76182 100644 --- a/test/e2e/quadlet/basic.kube +++ b/test/e2e/quadlet/basic.kube @@ -1,11 +1,11 @@ ## assert-podman-args "kube" ## assert-podman-args "play" -## assert-podman-final-args-regex .*/podman_test.*/quadlet/deployment.yml +## assert-podman-final-args-regex .*/podman-e2e-.*/subtest-.*/quadlet/deployment.yml ## assert-podman-args "--replace" ## assert-podman-args "--service-container=true" ## assert-podman-stop-post-args "kube" ## assert-podman-stop-post-args "down" -## assert-podman-stop-post-final-args-regex .*/podman_test.*/quadlet/deployment.yml +## assert-podman-stop-post-final-args-regex .*/podman-e2e-.*/subtest-.*/quadlet/deployment.yml ## assert-key-is "Unit" "RequiresMountsFor" "%t/containers" ## assert-key-is "Service" "KillMode" "mixed" ## assert-key-is "Service" "Type" "notify" diff --git a/test/e2e/quadlet/build-not-found.quadlet.volume b/test/e2e/quadlet/build-not-found.quadlet.volume new file mode 100644 index 0000000000..fdbd6d6f04 --- /dev/null +++ b/test/e2e/quadlet/build-not-found.quadlet.volume @@ -0,0 +1,6 @@ +## assert-failed +## assert-stderr-contains "requested Quadlet image not-found.build was not found" + +[Volume] +Driver=image +Image=not-found.build diff --git a/test/e2e/quadlet/build.quadlet.volume b/test/e2e/quadlet/build.quadlet.volume new file mode 100644 index 0000000000..177e98981e --- /dev/null +++ b/test/e2e/quadlet/build.quadlet.volume @@ -0,0 +1,8 @@ +## assert-podman-args --driver=image +## assert-podman-args --opt image=localhost/imagename +## assert-key-is "Unit" "Requires" "basic-build.service" +## assert-key-is "Unit" "After" "basic-build.service" + +[Volume] +Driver=image +Image=basic.build diff --git a/test/e2e/quadlet/configmap.kube b/test/e2e/quadlet/configmap.kube index 6e09fcd8c6..49c42b7e13 100644 --- a/test/e2e/quadlet/configmap.kube +++ b/test/e2e/quadlet/configmap.kube @@ -1,5 +1,5 @@ ## assert-podman-args "--configmap" "/opt/k8s/abs.yml" -## assert-podman-args-regex "--configmap" ".*/podman_test.*/quadlet/rel.yml" +## assert-podman-args-regex "--configmap" ".*/podman-e2e-.*/subtest-.*/quadlet/rel.yml" [Kube] Yaml=deployment.yml diff --git a/test/e2e/quadlet/containersconfmodule.build b/test/e2e/quadlet/containersconfmodule.build new file mode 100644 index 0000000000..ac929ccd00 --- /dev/null +++ b/test/e2e/quadlet/containersconfmodule.build @@ -0,0 +1,8 @@ +## assert-podman-global-args "build" "--module=/etc/container/1.conf" +## assert-podman-global-args "build" "--module=/etc/container/2.conf" + +[Build] +ImageTag=image:latest +SetWorkingDirectory=unit +ContainersConfModule=/etc/container/1.conf +ContainersConfModule=/etc/container/2.conf diff --git a/test/e2e/quadlet/dns-options.build b/test/e2e/quadlet/dns-options.build new file mode 100644 index 0000000000..5bdfb202df --- /dev/null +++ b/test/e2e/quadlet/dns-options.build @@ -0,0 +1,10 @@ +## assert-podman-final-args-regex /.*/podman-e2e-.*/subtest-.*/quadlet +## assert-podman-args --tag=localhost/imagename +## assert-podman-args "--dns-option=ndots:1" +## assert-podman-args "--dns-option=color:blue" + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit +DNSOption=ndots:1 +DNSOption=color:blue diff --git a/test/e2e/quadlet/dns-search.build b/test/e2e/quadlet/dns-search.build new file mode 100644 index 0000000000..f2340bacd0 --- /dev/null +++ b/test/e2e/quadlet/dns-search.build @@ -0,0 +1,10 @@ +## assert-podman-final-args-regex /.*/podman-e2e-.*/subtest-.*/quadlet +## assert-podman-args --tag=localhost/imagename +## assert-podman-args "--dns-search=foo.com" +## assert-podman-args "--dns-search=bar.com" + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit +DNSSearch=foo.com +DNSSearch=bar.com diff --git a/test/e2e/quadlet/dns.build b/test/e2e/quadlet/dns.build new file mode 100644 index 0000000000..aaf96ce171 --- /dev/null +++ b/test/e2e/quadlet/dns.build @@ -0,0 +1,10 @@ +## assert-podman-final-args-regex /.*/podman-e2e-.*/subtest-.*/quadlet +## assert-podman-args --tag=localhost/imagename +## assert-podman-args "--dns=8.7.7.7" +## assert-podman-args "--dns=8.8.8.8" + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit +DNS=8.7.7.7 +DNS=8.8.8.8 diff --git a/test/e2e/quadlet/downforce.kube b/test/e2e/quadlet/downforce.kube index 19f5fd1b85..081971e8f2 100644 --- a/test/e2e/quadlet/downforce.kube +++ b/test/e2e/quadlet/downforce.kube @@ -1,7 +1,7 @@ ## assert-podman-stop-post-args "kube" ## assert-podman-stop-post-args "down" ## assert-podman-stop-post-args "--force" -## assert-podman-stop-post-final-args-regex .*/podman_test.*/quadlet/deployment.yml +## assert-podman-stop-post-final-args-regex .*/podman-e2e-.*/subtest-.*/quadlet/deployment.yml [Kube] Yaml=deployment.yml diff --git a/test/e2e/quadlet/env-file.container b/test/e2e/quadlet/env-file.container index 47bdf7cae4..ebb9549889 100644 --- a/test/e2e/quadlet/env-file.container +++ b/test/e2e/quadlet/env-file.container @@ -1,7 +1,7 @@ ## assert-podman-final-args localhost/imagename ## assert-podman-args --env-file /opt/env/abs-1 ## assert-podman-args --env-file /opt/env/abs-2 -## assert-podman-args-regex --env-file /.*/podman_test.*/quadlet/rel-1 +## assert-podman-args-regex --env-file /.*/podman-e2e-.*/subtest-.*/quadlet/rel-1 ## assert-podman-args --env-file %h/env [Container] diff --git a/test/e2e/quadlet/env.build b/test/e2e/quadlet/env.build new file mode 100644 index 0000000000..55a09b2397 --- /dev/null +++ b/test/e2e/quadlet/env.build @@ -0,0 +1,14 @@ +## assert-podman-final-args-regex /.*/podman-e2e-.*/subtest-.*/quadlet +## assert-podman-args --tag=localhost/imagename +## assert-podman-args --env "FOO1=foo1" +## assert-podman-args --env "FOO2=foo2 " +## assert-podman-args --env "FOO3=foo3" +## assert-podman-args --env "REPLACE=replaced" +## assert-podman-args --env "FOO4=foo\\nfoo" + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit +Environment=FOO1=foo1 "FOO2=foo2 " \ + FOO3=foo3 REPLACE=replace +Environment=REPLACE=replaced 'FOO4=foo\nfoo' diff --git a/test/e2e/quadlet/file-abs.build b/test/e2e/quadlet/file-abs.build new file mode 100644 index 0000000000..89c2877a6a --- /dev/null +++ b/test/e2e/quadlet/file-abs.build @@ -0,0 +1,5 @@ +## assert-podman-final-args --file=/etc/containers/systemd/Containerfile + +[Build] +File=/etc/containers/systemd/Containerfile +ImageTag=localhost/imagename diff --git a/test/e2e/quadlet/file-https.build b/test/e2e/quadlet/file-https.build new file mode 100644 index 0000000000..9cb2971561 --- /dev/null +++ b/test/e2e/quadlet/file-https.build @@ -0,0 +1,6 @@ +## assert-podman-args --tag=localhost/podman-hello +## assert-podman-args --file=https://raw.githubusercontent.com/containers/PodmanHello/main/Containerfile + +[Build] +File=https://raw.githubusercontent.com/containers/PodmanHello/main/Containerfile +ImageTag=localhost/podman-hello diff --git a/test/e2e/quadlet/file-rel-no-wd.build b/test/e2e/quadlet/file-rel-no-wd.build new file mode 100644 index 0000000000..a1776d51bf --- /dev/null +++ b/test/e2e/quadlet/file-rel-no-wd.build @@ -0,0 +1,6 @@ +## assert-failed +## assert-stderr-contains "relative path in File key requires SetWorkingDirectory key to be set" + +[Build] +ImageTag=localhost/imagename +File=Containerfile diff --git a/test/e2e/quadlet/file-rel.build b/test/e2e/quadlet/file-rel.build new file mode 100644 index 0000000000..3cc60e500e --- /dev/null +++ b/test/e2e/quadlet/file-rel.build @@ -0,0 +1,7 @@ +## assert-podman-final-args . +## assert-podman-args-regex "--file=Containerfile" + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=. +File=Containerfile diff --git a/test/e2e/quadlet/force-rm.build b/test/e2e/quadlet/force-rm.build new file mode 100644 index 0000000000..7a17e60f26 --- /dev/null +++ b/test/e2e/quadlet/force-rm.build @@ -0,0 +1,8 @@ +## assert-podman-final-args-regex /.*/podman-e2e-.*/subtest-.*/quadlet +## assert-podman-args --tag=localhost/imagename +## assert-podman-args --force-rm=false + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit +ForceRM=no diff --git a/test/e2e/quadlet/globalargs.build b/test/e2e/quadlet/globalargs.build new file mode 100644 index 0000000000..01c13bed2c --- /dev/null +++ b/test/e2e/quadlet/globalargs.build @@ -0,0 +1,9 @@ +## assert-podman-global-args "build" "--identity=path=/etc/identity" +## assert-podman-global-args "build" "--syslog" +## assert-podman-global-args "build" "--log-level=debug" + +[Build] +ImageTag=image:latest +SetWorkingDirectory=unit +GlobalArgs=--identity=path=/etc/identity +GlobalArgs=--syslog --log-level=debug diff --git a/test/e2e/quadlet/group-add.build b/test/e2e/quadlet/group-add.build new file mode 100644 index 0000000000..f2a54a50f1 --- /dev/null +++ b/test/e2e/quadlet/group-add.build @@ -0,0 +1,8 @@ +## assert-podman-args "--group-add=keep-groups" +## assert-podman-args "--group-add=users" + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit +GroupAdd=keep-groups +GroupAdd=users diff --git a/test/e2e/quadlet/group-add.container b/test/e2e/quadlet/group-add.container new file mode 100644 index 0000000000..99800604cf --- /dev/null +++ b/test/e2e/quadlet/group-add.container @@ -0,0 +1,7 @@ +## assert-podman-args "--group-add=keep-groups" +## assert-podman-args "--group-add=users" + +[Container] +Image=localhost/imagename +GroupAdd=keep-groups +GroupAdd=users diff --git a/test/e2e/quadlet/image-not-found.quadlet.volume b/test/e2e/quadlet/image-not-found.quadlet.volume new file mode 100644 index 0000000000..bbb9851a2d --- /dev/null +++ b/test/e2e/quadlet/image-not-found.quadlet.volume @@ -0,0 +1,6 @@ +## assert-failed +## assert-stderr-contains "requested Quadlet image not-found.image was not found" + +[Volume] +Driver=image +Image=not-found.image diff --git a/test/e2e/quadlet/image.quadlet.volume b/test/e2e/quadlet/image.quadlet.volume new file mode 100644 index 0000000000..8a64b00768 --- /dev/null +++ b/test/e2e/quadlet/image.quadlet.volume @@ -0,0 +1,8 @@ +## assert-podman-args --driver=image +## assert-podman-args --opt image=localhost/imagename +## assert-key-is "Unit" "Requires" "basic-image.service" +## assert-key-is "Unit" "After" "basic-image.service" + +[Volume] +Driver=image +Image=basic.image diff --git a/test/e2e/quadlet/label.build b/test/e2e/quadlet/label.build new file mode 100644 index 0000000000..688c2b2455 --- /dev/null +++ b/test/e2e/quadlet/label.build @@ -0,0 +1,14 @@ +## assert-podman-final-args-regex /.*/podman-e2e-.*/subtest-.*/quadlet +## assert-podman-args --tag=localhost/imagename +## assert-podman-args "--label" "org.foo.Arg0=arg0" +## assert-podman-args "--label" "org.foo.Arg1=arg1" +## assert-podman-args "--label" "org.foo.Arg2=arg 2" +## assert-podman-args "--label" "org.foo.Arg3=arg3" + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit +Label=org.foo.Arg1=arg1 "org.foo.Arg2=arg 2" \ + org.foo.Arg3=arg3 + +Label=org.foo.Arg0=arg0 diff --git a/test/e2e/quadlet/logopt.container b/test/e2e/quadlet/logopt.container new file mode 100644 index 0000000000..07b0936ea8 --- /dev/null +++ b/test/e2e/quadlet/logopt.container @@ -0,0 +1,9 @@ +## assert-podman-args "--log-opt" "path=/var/log/some-logs.json" +## assert-podman-args "--log-opt" "size=10mb" +## assert-podman-args "--log-opt" "tag="{{.ImageName}}"" + +[Container] +Image=localhost/imagename +LogOpt=path=/var/log/some-logs.json +LogOpt=size=10mb +LogOpt=tag="{{.ImageName}}" diff --git a/test/e2e/quadlet/logopt.kube b/test/e2e/quadlet/logopt.kube new file mode 100644 index 0000000000..af5d700f56 --- /dev/null +++ b/test/e2e/quadlet/logopt.kube @@ -0,0 +1,9 @@ +## assert-podman-args "--log-opt" "path=/var/log/some-logs.json" +## assert-podman-args "--log-opt" "size=10mb" +## assert-podman-args "--log-opt" "tag="{{.ImageName}}"" + +[Kube] +Yaml=deployment.yml +LogOpt=path=/var/log/some-logs.json +LogOpt=size=10mb +LogOpt=tag="{{.ImageName}}" diff --git a/test/e2e/quadlet/mount.container b/test/e2e/quadlet/mount.container index 5e16aebc99..9969571bb1 100644 --- a/test/e2e/quadlet/mount.container +++ b/test/e2e/quadlet/mount.container @@ -10,7 +10,7 @@ Mount=type=bind,src=/path/on/host,dst=/path/in/container,relabel=shared,U=true Mount=type=volume,source=vol1,destination=/path/in/container,ro=true ## assert-podman-args-key-val "--mount" "," "type=volume,source=systemd-vol2,destination=/path/in/container,ro=true" ## assert-key-is "Unit" "Requires" "vol2-volume.service" -## assert-key-is "Unit" "After" "vol2-volume.service" +## assert-key-is "Unit" "After" "network-online.target" "vol2-volume.service" Mount=type=volume,source=vol2.volume,destination=/path/in/container,ro=true ## assert-podman-args-key-val "--mount" "," "type=tmpfs,tmpfs-size=512M,destination=/path/in/container" Mount=type=tmpfs,tmpfs-size=512M,destination=/path/in/container @@ -18,11 +18,11 @@ Mount=type=tmpfs,tmpfs-size=512M,destination=/path/in/container Mount=type=image,source=fedora,destination=/fedora-image,rw=true ## assert-podman-args-key-val "--mount" "," "type=devpts,destination=/dev/pts" Mount=type=devpts,destination=/dev/pts -## assert-podman-args-key-val-regex "--mount" "," "type=bind,source=.*/podman_test.*/quadlet/path/on/host,destination=/path/in/container" +## assert-podman-args-key-val-regex "--mount" "," "type=bind,source=.*/podman-e2e-.*/subtest-.*/quadlet/path/on/host,destination=/path/in/container" Mount=type=bind,source=./path/on/host,destination=/path/in/container ## assert-podman-args-key-val "--mount" "," "type=volume,source=vol1,destination=/path/in/container,ro" Mount=type=volume,source=vol1,destination=/path/in/container,ro ## assert-podman-args-key-val "--mount" "," "type=bind,source=/tmp,\"dst=/path,1\"" Mount=type=bind,src=/tmp,\"dst=/path,1\" -## assert-podman-args-key-val-regex "--mount" "," "type=bind,source=.*/podman_test.*/quadlet/src,destination=/dst/,idmap=uids=12-34-1;gids=56-78-1" +## assert-podman-args-key-val-regex "--mount" "," "type=bind,source=.*/podman-e2e-.*/subtest-.*/quadlet/src,destination=/dst/,idmap=uids=12-34-1;gids=56-78-1" Mount=type=bind,source=./src/,destination=/dst/,idmap=uids=12-34-1;gids=56-78-1 diff --git a/test/e2e/quadlet/neither-workingdirectory-nor-file.build b/test/e2e/quadlet/neither-workingdirectory-nor-file.build new file mode 100644 index 0000000000..4dab5e7f31 --- /dev/null +++ b/test/e2e/quadlet/neither-workingdirectory-nor-file.build @@ -0,0 +1,5 @@ +## assert-failed +## assert-stderr-contains "neither SetWorkingDirectory, nor File key specified" + +[Build] +ImageTag=localhost/imagename diff --git a/test/e2e/quadlet/network.build b/test/e2e/quadlet/network.build new file mode 100644 index 0000000000..cc9fddd26c --- /dev/null +++ b/test/e2e/quadlet/network.build @@ -0,0 +1,8 @@ +## assert-podman-final-args-regex /.*/podman-e2e-.*/subtest-.*/quadlet +## assert-podman-args --tag=localhost/imagename +## assert-podman-args "--network=host" + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit +Network=host diff --git a/test/e2e/quadlet/network.quadlet.build b/test/e2e/quadlet/network.quadlet.build new file mode 100644 index 0000000000..6423aa090d --- /dev/null +++ b/test/e2e/quadlet/network.quadlet.build @@ -0,0 +1,8 @@ +## assert-podman-args "--network=systemd-basic" +## assert-key-is "Unit" "Requires" "basic-network.service" +## assert-key-is "Unit" "After" "network-online.target" "basic-network.service" + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit +Network=basic.network diff --git a/test/e2e/quadlet/network.quadlet.container b/test/e2e/quadlet/network.quadlet.container index 27193617f2..a37e56cdf0 100644 --- a/test/e2e/quadlet/network.quadlet.container +++ b/test/e2e/quadlet/network.quadlet.container @@ -1,6 +1,6 @@ ## assert-podman-args "--network=systemd-basic" ## assert-key-is "Unit" "Requires" "basic-network.service" -## assert-key-is "Unit" "After" "basic-network.service" +## assert-key-is "Unit" "After" "network-online.target" "basic-network.service" [Container] Image=localhost/imagename diff --git a/test/e2e/quadlet/no-imagetag.build b/test/e2e/quadlet/no-imagetag.build new file mode 100644 index 0000000000..6f19b80764 --- /dev/null +++ b/test/e2e/quadlet/no-imagetag.build @@ -0,0 +1,5 @@ +## assert-failed +## assert-stderr-contains "no ImageTag key specified" + +[Build] +SetWorkingDirectory=unit diff --git a/test/e2e/quadlet/oneshot.kube b/test/e2e/quadlet/oneshot.kube index 1b96e10337..f743617e85 100644 --- a/test/e2e/quadlet/oneshot.kube +++ b/test/e2e/quadlet/oneshot.kube @@ -1,11 +1,11 @@ ## assert-podman-args "kube" ## assert-podman-args "play" -## assert-podman-final-args-regex .*/podman_test.*/quadlet/deployment.yml +## assert-podman-final-args-regex .*/podman-e2e-.*/subtest-.*/quadlet/deployment.yml ## assert-podman-args "--replace" ## assert-podman-args "--service-container=true" ## assert-podman-stop-post-args "kube" ## assert-podman-stop-post-args "down" -## assert-podman-stop-post-final-args-regex .*/podman_test.*/quadlet/deployment.yml +## assert-podman-stop-post-final-args-regex .*/podman-e2e-.*/subtest-.*/quadlet/deployment.yml ## assert-key-is "Unit" "RequiresMountsFor" "%t/containers" ## assert-key-is "Service" "KillMode" "mixed" ## assert-key-is "Service" "Type" "oneshot" diff --git a/test/e2e/quadlet/podmanargs.build b/test/e2e/quadlet/podmanargs.build new file mode 100644 index 0000000000..f64ce76149 --- /dev/null +++ b/test/e2e/quadlet/podmanargs.build @@ -0,0 +1,14 @@ +## assert-podman-args "--foo" +## assert-podman-args "--bar" +## assert-podman-args "--also" +## assert-podman-args "--with-key=value" +## assert-podman-args "--with-space" "yes" + +[Build] +ImageTag=image:latest +SetWorkingDirectory=unit +PodmanArgs="--foo" \ + --bar +PodmanArgs=--also +PodmanArgs=--with-key=value +PodmanArgs=--with-space yes diff --git a/test/e2e/quadlet/pull.build b/test/e2e/quadlet/pull.build new file mode 100644 index 0000000000..7aad2ca20a --- /dev/null +++ b/test/e2e/quadlet/pull.build @@ -0,0 +1,8 @@ +## assert-podman-final-args-regex /.*/podman-e2e-.*/subtest-.*/quadlet +## assert-podman-args --tag=localhost/imagename +## assert-podman-args --pull=never + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit +Pull=never diff --git a/test/e2e/quadlet/secrets.build b/test/e2e/quadlet/secrets.build new file mode 100644 index 0000000000..8d1df9cb23 --- /dev/null +++ b/test/e2e/quadlet/secrets.build @@ -0,0 +1,8 @@ +## assert-podman-args "--secret" "mysecret" +## assert-podman-args "--secret" "id=mysecret,src=mysecret.txt" + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit +Secret=mysecret +Secret=id=mysecret,src=mysecret.txt diff --git a/test/e2e/quadlet/setworkingdirectory-is-abs.build b/test/e2e/quadlet/setworkingdirectory-is-abs.build new file mode 100644 index 0000000000..e408e3b291 --- /dev/null +++ b/test/e2e/quadlet/setworkingdirectory-is-abs.build @@ -0,0 +1,5 @@ +## assert-podman-final-args /etc/containers/systemd + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=/etc/containers/systemd diff --git a/test/e2e/quadlet/setworkingdirectory-is-archive.build b/test/e2e/quadlet/setworkingdirectory-is-archive.build new file mode 100644 index 0000000000..7e40fe4c60 --- /dev/null +++ b/test/e2e/quadlet/setworkingdirectory-is-archive.build @@ -0,0 +1,8 @@ +## assert-podman-final-args https://github.com/containers/PodmanHello/archive/refs/heads/main.tar.gz +## assert-podman-args --tag=localhost/podman-hello-archive +## assert-podman-args --file=PodmanHello-main/Containerfile + +[Build] +ImageTag=localhost/podman-hello-archive +File=PodmanHello-main/Containerfile +SetWorkingDirectory=https://github.com/containers/PodmanHello/archive/refs/heads/main.tar.gz diff --git a/test/e2e/quadlet/setworkingdirectory-is-file-abs.build b/test/e2e/quadlet/setworkingdirectory-is-file-abs.build new file mode 100644 index 0000000000..0ebf1918b1 --- /dev/null +++ b/test/e2e/quadlet/setworkingdirectory-is-file-abs.build @@ -0,0 +1,7 @@ +## assert-podman-args --file=/etc/containers/systemd/Containerfile +## assert-key-is "Service" "WorkingDirectory" "/etc/containers/systemd" + +[Build] +File=/etc/containers/systemd/Containerfile +ImageTag=localhost/imagename +SetWorkingDirectory=file diff --git a/test/e2e/quadlet/setworkingdirectory-is-file-rel.build b/test/e2e/quadlet/setworkingdirectory-is-file-rel.build new file mode 100644 index 0000000000..472d84ffca --- /dev/null +++ b/test/e2e/quadlet/setworkingdirectory-is-file-rel.build @@ -0,0 +1,7 @@ +## assert-podman-args --file=Containerfile +## assert-key-is-regex "Service" "WorkingDirectory" "/.*/podman-e2e-.*/subtest-.*/quadlet" + +[Build] +ImageTag=localhost/imagename +File=Containerfile +SetWorkingDirectory=file diff --git a/test/e2e/quadlet/setworkingdirectory-is-git.build b/test/e2e/quadlet/setworkingdirectory-is-git.build new file mode 100644 index 0000000000..f85718f608 --- /dev/null +++ b/test/e2e/quadlet/setworkingdirectory-is-git.build @@ -0,0 +1,6 @@ +## assert-podman-final-args git://git@git.sr.ht/~emersion/sr.ht-container-compose +## assert-podman-args --tag=localhost/podman-hello + +[Build] +ImageTag=localhost/podman-hello +SetWorkingDirectory=git://git@git.sr.ht/~emersion/sr.ht-container-compose diff --git a/test/e2e/quadlet/setworkingdirectory-is-github.build b/test/e2e/quadlet/setworkingdirectory-is-github.build new file mode 100644 index 0000000000..484bc21cd1 --- /dev/null +++ b/test/e2e/quadlet/setworkingdirectory-is-github.build @@ -0,0 +1,6 @@ +## assert-podman-final-args github.com/containers/PodmanHello.git +## assert-podman-args --tag=localhost/podman-hello + +[Build] +ImageTag=localhost/podman-hello +SetWorkingDirectory=github.com/containers/PodmanHello.git diff --git a/test/e2e/quadlet/setworkingdirectory-is-https-git.build b/test/e2e/quadlet/setworkingdirectory-is-https-git.build new file mode 100644 index 0000000000..ec03f8dd2f --- /dev/null +++ b/test/e2e/quadlet/setworkingdirectory-is-https-git.build @@ -0,0 +1,6 @@ +## assert-podman-final-args https://github.com/containers/PodmanHello.git +## assert-podman-args --tag=localhost/podman-hello + +[Build] +ImageTag=localhost/podman-hello +SetWorkingDirectory=https://github.com/containers/PodmanHello.git diff --git a/test/e2e/quadlet/setworkingdirectory-is-rel.build b/test/e2e/quadlet/setworkingdirectory-is-rel.build new file mode 100644 index 0000000000..5df3b7ee32 --- /dev/null +++ b/test/e2e/quadlet/setworkingdirectory-is-rel.build @@ -0,0 +1,6 @@ +## assert-podman-final-args . +## assert-key-is-regex "Service" "WorkingDirectory" "/.*/podman-e2e-.*/subtest-.*/quadlet" + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=. diff --git a/test/e2e/quadlet/target.build b/test/e2e/quadlet/target.build new file mode 100644 index 0000000000..6af72c1c82 --- /dev/null +++ b/test/e2e/quadlet/target.build @@ -0,0 +1,8 @@ +## assert-podman-final-args-regex /.*/podman-e2e-.*/subtest-.*/quadlet +## assert-podman-args --tag=localhost/imagename +## assert-podman-args --target=my-app + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit +Target=my-app diff --git a/test/e2e/quadlet/template@.container b/test/e2e/quadlet/template@.container index 5f17e8bf61..d8b10449da 100644 --- a/test/e2e/quadlet/template@.container +++ b/test/e2e/quadlet/template@.container @@ -1,5 +1,5 @@ ## assert-podman-final-args localhost/imagename -## assert-podman-args "--name=systemd-%P_%I" +## assert-podman-args "--name=systemd-%p_%i" ## assert-symlink want.service.wants/template@default.service ../template@.service ## assert-podman-args --env "FOO=bar" diff --git a/test/e2e/quadlet/template@instance.container b/test/e2e/quadlet/template@instance.container index 0144e5e7ee..ea64082196 100644 --- a/test/e2e/quadlet/template@instance.container +++ b/test/e2e/quadlet/template@instance.container @@ -1,5 +1,5 @@ ## assert-podman-final-args localhost/changed-image -## assert-podman-args "--name=systemd-%P_%I" +## assert-podman-args "--name=systemd-%p_%i" ## assert-symlink want.service.wants/template@instance.service ../template@instance.service ## assert-podman-args --env "FOO=bar" diff --git a/test/e2e/quadlet/tls-verify.build b/test/e2e/quadlet/tls-verify.build new file mode 100644 index 0000000000..0970bbab7a --- /dev/null +++ b/test/e2e/quadlet/tls-verify.build @@ -0,0 +1,8 @@ +## assert-podman-final-args-regex /.*/podman-e2e-.*/subtest-.*/quadlet +## assert-podman-args --tag=localhost/imagename +## assert-podman-args --tls-verify=false + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit +TLSVerify=no diff --git a/test/e2e/quadlet/unit-after-override.container b/test/e2e/quadlet/unit-after-override.container new file mode 100644 index 0000000000..f2ebb4ffc3 --- /dev/null +++ b/test/e2e/quadlet/unit-after-override.container @@ -0,0 +1,7 @@ +## assert-last-key-is-regex "Unit" "After" "^$" + +[Unit] +After= + +[Container] +Image=localhost/imagename diff --git a/test/e2e/quadlet/unit-after-override.image b/test/e2e/quadlet/unit-after-override.image new file mode 100644 index 0000000000..87549d3c06 --- /dev/null +++ b/test/e2e/quadlet/unit-after-override.image @@ -0,0 +1,7 @@ +## assert-last-key-is-regex "Unit" "After" "^$" + +[Unit] +After= + +[Image] +Image=localhost/imagename diff --git a/test/e2e/quadlet/variant.build b/test/e2e/quadlet/variant.build new file mode 100644 index 0000000000..ffc03b1ac7 --- /dev/null +++ b/test/e2e/quadlet/variant.build @@ -0,0 +1,8 @@ +## assert-podman-final-args-regex /.*/podman-e2e-.*/subtest-.*/quadlet +## assert-podman-args --tag=localhost/imagename +## assert-podman-args --variant=arm/v7 + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit +Variant=arm/v7 diff --git a/test/e2e/quadlet/volume.build b/test/e2e/quadlet/volume.build new file mode 100644 index 0000000000..f63ac66fc6 --- /dev/null +++ b/test/e2e/quadlet/volume.build @@ -0,0 +1,17 @@ +## assert-podman-args -v /host/dir:/container/volume +## assert-podman-args -v /host/dir2:/container/volume2:Z +## assert-podman-args-regex -v .*/podman-e2e-.*/subtest-.*/quadlet/host/dir3:/container/volume3 +## assert-podman-args -v named:/container/named +## assert-podman-args -v systemd-quadlet:/container/quadlet +## assert-podman-args -v %h/container:/container/volume4 + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit +Volume=/host/dir:/container/volume +Volume=/host/dir2:/container/volume2:Z +Volume=./host/dir3:/container/volume3 +Volume=/container/empty +Volume=named:/container/named +Volume=quadlet.volume:/container/quadlet +Volume=%h/container:/container/volume4 diff --git a/test/e2e/quadlet/volume.container b/test/e2e/quadlet/volume.container index 934ce6f335..b3d033c6f8 100644 --- a/test/e2e/quadlet/volume.container +++ b/test/e2e/quadlet/volume.container @@ -1,6 +1,6 @@ ## assert-podman-args -v /host/dir:/container/volume ## assert-podman-args -v /host/dir2:/container/volume2:Z -## assert-podman-args-regex -v .*/podman_test.*/quadlet/host/dir3:/container/volume3 +## assert-podman-args-regex -v .*/podman-e2e-.*/subtest-.*/quadlet/host/dir3:/container/volume3 ## assert-podman-args -v named:/container/named ## assert-podman-args -v systemd-quadlet:/container/quadlet ## assert-podman-args -v %h/container:/container/volume4 diff --git a/test/e2e/quadlet/volume.pod b/test/e2e/quadlet/volume.pod index 7950b0ab54..b2fa6117fa 100644 --- a/test/e2e/quadlet/volume.pod +++ b/test/e2e/quadlet/volume.pod @@ -1,6 +1,6 @@ ## assert-podman-pre-args -v /host/dir:/container/volume ## assert-podman-pre-args -v /host/dir2:/container/volume2:Z -## assert-podman-pre-args-regex -v .*/podman_test.*/quadlet/host/dir3:/container/volume3 +## assert-podman-pre-args-regex -v .*/podman-e2e-.*/subtest-.*/quadlet/host/dir3:/container/volume3 ## assert-podman-pre-args -v named:/container/named ## assert-podman-pre-args -v systemd-quadlet:/container/quadlet ## assert-podman-pre-args -v %h/container:/container/volume4 diff --git a/test/e2e/quadlet/volume.quadlet.build b/test/e2e/quadlet/volume.quadlet.build new file mode 100644 index 0000000000..6cb57bda13 --- /dev/null +++ b/test/e2e/quadlet/volume.quadlet.build @@ -0,0 +1,8 @@ +## assert-podman-args "-v" "systemd-basic:/volume/basic" +## assert-key-is "Unit" "Requires" "basic-volume.service" +## assert-key-is "Unit" "After" "network-online.target" "basic-volume.service" + +[Build] +ImageTag=localhost/imagename +SetWorkingDirectory=unit +Volume=basic.volume:/volume/basic diff --git a/test/e2e/quadlet/workingdir-unit.kube b/test/e2e/quadlet/workingdir-unit.kube index ef6adef606..ffc7d04849 100644 --- a/test/e2e/quadlet/workingdir-unit.kube +++ b/test/e2e/quadlet/workingdir-unit.kube @@ -1,4 +1,4 @@ -## assert-key-is-regex "Service" "WorkingDirectory" ".*/podman_test.*/quadlet" +## assert-key-is-regex "Service" "WorkingDirectory" ".*/podman-e2e-.*/subtest-.*/quadlet" [Kube] Yaml=deployment.yml diff --git a/test/e2e/quadlet/workingdir-yaml-rel.kube b/test/e2e/quadlet/workingdir-yaml-rel.kube index ddde1c361d..5a5823833b 100644 --- a/test/e2e/quadlet/workingdir-yaml-rel.kube +++ b/test/e2e/quadlet/workingdir-yaml-rel.kube @@ -1,4 +1,4 @@ -## assert-key-is-regex "Service" "WorkingDirectory" ".*/podman_test.*/quadlet/myservice" +## assert-key-is-regex "Service" "WorkingDirectory" ".*/podman-e2e-.*/subtest-.*/quadlet/myservice" [Kube] Yaml=./myservice/deployment.yml diff --git a/test/e2e/quadlet_test.go b/test/e2e/quadlet_test.go index 238e02326c..5b97493252 100644 --- a/test/e2e/quadlet_test.go +++ b/test/e2e/quadlet_test.go @@ -50,6 +50,8 @@ func loadQuadletTestcase(path string) *quadletTestcase { service += "-network" case ".image": service += "-image" + case ".build": + service += "-build" case ".pod": service += "-pod" } @@ -172,6 +174,24 @@ func (t *quadletTestcase) assertKeyIsRegex(args []string, unit *parser.UnitFile) return true } +func (t *quadletTestcase) assertLastKeyIsRegex(args []string, unit *parser.UnitFile) bool { + Expect(len(args)).To(BeNumerically(">=", 3)) + group := args[0] + key := args[1] + regex := args[2] + + value, ok := unit.LookupLast(group, key) + if !ok { + return false + } + + matched, err := regexp.MatchString(regex, value) + if err != nil || !matched { + return false + } + return true +} + func (t *quadletTestcase) assertKeyContains(args []string, unit *parser.UnitFile) bool { Expect(args).To(HaveLen(3)) group := args[0] @@ -469,6 +489,8 @@ func (t *quadletTestcase) doAssert(check []string, unit *parser.UnitFile, sessio ok = t.assertKeyIsRegex(args, unit) case "assert-key-contains": ok = t.assertKeyContains(args, unit) + case "assert-last-key-is-regex": + ok = t.assertLastKeyIsRegex(args, unit) case "assert-podman-args": ok = t.assertStartPodmanArgs(args, unit) case "assert-podman-args-regex": @@ -586,6 +608,44 @@ var _ = Describe("quadlet system generator", func() { err error generatedDir string quadletDir string + + runQuadletTestCase = func(fileName string, exitCode int, errString string) { + testcase := loadQuadletTestcase(filepath.Join("quadlet", fileName)) + + // Write the tested file to the quadlet dir + err = os.WriteFile(filepath.Join(quadletDir, fileName), testcase.data, 0644) + Expect(err).ToNot(HaveOccurred()) + + // Also copy any extra snippets + snippetdirs := []string{fileName + ".d"} + if ok, genericFileName := getGenericTemplateFile(fileName); ok { + snippetdirs = append(snippetdirs, genericFileName+".d") + } + for _, snippetdir := range snippetdirs { + dotdDir := filepath.Join("quadlet", snippetdir) + if s, err := os.Stat(dotdDir); err == nil && s.IsDir() { + dotdDirDest := filepath.Join(quadletDir, snippetdir) + err = os.Mkdir(dotdDirDest, os.ModePerm) + Expect(err).ToNot(HaveOccurred()) + err = CopyDirectory(dotdDir, dotdDirDest) + Expect(err).ToNot(HaveOccurred()) + } + } + + // Run quadlet to convert the file + session := podmanTest.Quadlet([]string{"--user", "--no-kmsg-log", generatedDir}, quadletDir) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(exitCode)) + + // Print any stderr output + errs := session.ErrorToString() + if errs != "" { + GinkgoWriter.Println("error:", session.ErrorToString()) + } + Expect(errs).Should(ContainSubstring(errString)) + + testcase.check(generatedDir, session) + } ) BeforeEach(func() { @@ -695,12 +755,12 @@ BOGUS=foo "---basic.service---", "## assert-podman-args \"kube\"", "## assert-podman-args \"play\"", - "## assert-podman-final-args-regex .*/podman_test.*/quadlet/deployment.yml", + "## assert-podman-final-args-regex .*/podman-e2e-.*/subtest-.*/quadlet/deployment.yml", "## assert-podman-args \"--replace\"", "## assert-podman-args \"--service-container=true\"", "## assert-podman-stop-post-args \"kube\"", "## assert-podman-stop-post-args \"down\"", - "## assert-podman-stop-post-final-args-regex .*/podman_test.*/quadlet/deployment.yml", + "## assert-podman-stop-post-final-args-regex .*/podman-e2e-.*/subtest-.*/quadlet/deployment.yml", "## assert-key-is \"Unit\" \"RequiresMountsFor\" \"%t/containers\"", "## assert-key-is \"Service\" \"KillMode\" \"mixed\"", "## assert-key-is \"Service\" \"Type\" \"notify\"", @@ -727,43 +787,7 @@ BOGUS=foo }) DescribeTable("Running quadlet test case", - func(fileName string, exitCode int, errString string) { - testcase := loadQuadletTestcase(filepath.Join("quadlet", fileName)) - - // Write the tested file to the quadlet dir - err = os.WriteFile(filepath.Join(quadletDir, fileName), testcase.data, 0644) - Expect(err).ToNot(HaveOccurred()) - - // Also copy any extra snippets - snippetdirs := []string{fileName + ".d"} - if ok, genericFileName := getGenericTemplateFile(fileName); ok { - snippetdirs = append(snippetdirs, genericFileName+".d") - } - for _, snippetdir := range snippetdirs { - dotdDir := filepath.Join("quadlet", snippetdir) - if s, err := os.Stat(dotdDir); err == nil && s.IsDir() { - dotdDirDest := filepath.Join(quadletDir, snippetdir) - err = os.Mkdir(dotdDirDest, os.ModePerm) - Expect(err).ToNot(HaveOccurred()) - err = CopyDirectory(dotdDir, dotdDirDest) - Expect(err).ToNot(HaveOccurred()) - } - } - - // Run quadlet to convert the file - session := podmanTest.Quadlet([]string{"--user", "--no-kmsg-log", generatedDir}, quadletDir) - session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(exitCode)) - - // Print any stderr output - errs := session.ErrorToString() - if errs != "" { - GinkgoWriter.Println("error:", session.ErrorToString()) - } - Expect(errs).Should(ContainSubstring(errString)) - - testcase.check(generatedDir, session) - }, + runQuadletTestCase, Entry("Basic container", "basic.container", 0, ""), Entry("annotation.container", "annotation.container", 0, ""), Entry("autoupdate.container", "autoupdate.container", 0, ""), @@ -783,6 +807,7 @@ BOGUS=foo Entry("entrypoint.container", "entrypoint.container", 0, ""), Entry("escapes.container", "escapes.container", 0, ""), Entry("exec.container", "exec.container", 0, ""), + Entry("group-add.container", "group-add.container", 0, ""), Entry("health.container", "health.container", 0, ""), Entry("hostname.container", "hostname.container", 0, ""), Entry("idmapping.container", "idmapping.container", 0, ""), @@ -793,6 +818,7 @@ BOGUS=foo Entry("label.container", "label.container", 0, ""), Entry("line-continuation-whitespace.container", "line-continuation-whitespace.container", 0, ""), Entry("logdriver.container", "logdriver.container", 0, ""), + Entry("logopt.container", "logopt.container", 0, ""), Entry("mask.container", "mask.container", 0, ""), Entry("mount.container", "mount.container", 0, ""), Entry("name.container", "name.container", 0, ""), @@ -846,6 +872,7 @@ BOGUS=foo Entry("merged-override.container", "merged-override.container", 0, ""), Entry("template@.container", "template@.container", 0, ""), Entry("template@instance.container", "template@instance.container", 0, ""), + Entry("Unit After Override", "unit-after-override.container", 0, ""), Entry("basic.volume", "basic.volume", 0, ""), Entry("device-copy.volume", "device-copy.volume", 0, ""), @@ -858,12 +885,15 @@ BOGUS=foo Entry("image-no-image.volume", "image-no-image.volume", 1, "converting \"image-no-image.volume\": the key Image is mandatory when using the image driver"), Entry("Volume - global args", "globalargs.volume", 0, ""), Entry("Volume - Containers Conf Modules", "containersconfmodule.volume", 0, ""), + Entry("Volume - Quadlet image (.build) not found", "build-not-found.quadlet.volume", 1, "converting \"build-not-found.quadlet.volume\": requested Quadlet image not-found.build was not found"), + Entry("Volume - Quadlet image (.image) not found", "image-not-found.quadlet.volume", 1, "converting \"image-not-found.quadlet.volume\": requested Quadlet image not-found.image was not found"), Entry("Absolute Path", "absolute.path.kube", 0, ""), Entry("Basic kube", "basic.kube", 0, ""), Entry("Kube - ConfigMap", "configmap.kube", 0, ""), Entry("Kube - Exit Code Propagation", "exit_code_propagation.kube", 0, ""), Entry("Kube - Logdriver", "logdriver.kube", 0, ""), + Entry("Kube - Logopt", "logopt.kube", 0, ""), Entry("Kube - Network", "network.kube", 0, ""), Entry("Kube - PodmanArgs", "podmanargs.kube", 0, ""), Entry("Kube - Publish IPv4 ports", "ports.kube", 0, ""), @@ -920,6 +950,45 @@ BOGUS=foo Entry("Image - Arch and OS", "arch-os.image", 0, ""), Entry("Image - global args", "globalargs.image", 0, ""), Entry("Image - Containers Conf Modules", "containersconfmodule.image", 0, ""), + Entry("Image - Unit After Override", "unit-after-override.image", 0, ""), + + Entry("Build - Basic", "basic.build", 0, ""), + Entry("Build - Annotation Key", "annotation.build", 0, ""), + Entry("Build - Arch Key", "arch.build", 0, ""), + Entry("Build - AuthFile Key", "authfile.build", 0, ""), + Entry("Build - DNS Key", "dns.build", 0, ""), + Entry("Build - DNSOptions Key", "dns-options.build", 0, ""), + Entry("Build - DNSSearch Key", "dns-search.build", 0, ""), + Entry("Build - Environment Key", "env.build", 0, ""), + Entry("Build - File Key absolute", "file-abs.build", 0, ""), + Entry("Build - File Key relative", "file-rel.build", 0, ""), + Entry("Build - File Key relative no WD", "file-rel-no-wd.build", 1, "converting \"file-rel-no-wd.build\": relative path in File key requires SetWorkingDirectory key to be set"), + Entry("Build - File Key HTTP(S) URL", "file-https.build", 0, ""), + Entry("Build - ForceRM Key", "force-rm.build", 0, ""), + Entry("Build - GlobalArgs", "globalargs.build", 0, ""), + Entry("Build - GroupAdd Key", "group-add.build", 0, ""), + Entry("Build - Containers Conf Modules", "containersconfmodule.build", 0, ""), + Entry("Build - Label Key", "label.build", 0, ""), + Entry("Build - Neither WorkingDirectory nor File Key", "neither-workingdirectory-nor-file.build", 1, "converting \"neither-workingdirectory-nor-file.build\": neither SetWorkingDirectory, nor File key specified"), + Entry("Build - Network Key host", "network.build", 0, ""), + Entry("Build - Network Key quadlet", "network.quadlet.build", 0, ""), + Entry("Build - No ImageTag Key", "no-imagetag.build", 1, "converting \"no-imagetag.build\": no ImageTag key specified"), + Entry("Build - PodmanArgs", "podmanargs.build", 0, ""), + Entry("Build - Pull Key", "pull.build", 0, ""), + Entry("Build - Secrets", "secrets.build", 0, ""), + Entry("Build - SetWorkingDirectory is absolute path", "setworkingdirectory-is-abs.build", 0, ""), + Entry("Build - SetWorkingDirectory is absolute File= path", "setworkingdirectory-is-file-abs.build", 0, ""), + Entry("Build - SetWorkingDirectory is relative path", "setworkingdirectory-is-rel.build", 0, ""), + Entry("Build - SetWorkingDirectory is relative File= path", "setworkingdirectory-is-file-rel.build", 0, ""), + Entry("Build - SetWorkingDirectory is https://.git URL", "setworkingdirectory-is-https-git.build", 0, ""), + Entry("Build - SetWorkingDirectory is git:// URL", "setworkingdirectory-is-git.build", 0, ""), + Entry("Build - SetWorkingDirectory is github.com URL", "setworkingdirectory-is-github.build", 0, ""), + Entry("Build - SetWorkingDirectory is archive URL", "setworkingdirectory-is-archive.build", 0, ""), + Entry("Build - Target Key", "target.build", 0, ""), + Entry("Build - TLSVerify Key", "tls-verify.build", 0, ""), + Entry("Build - Variant Key", "variant.build", 0, ""), + Entry("Build - Volume Key", "volume.build", 0, ""), + Entry("Build - Volume Key quadlet", "volume.quadlet.build", 0, ""), Entry("basic.pod", "basic.pod", 0, ""), Entry("name.pod", "name.pod", 0, ""), @@ -929,4 +998,19 @@ BOGUS=foo Entry("volume.pod", "volume.pod", 0, ""), ) + DescribeTable("Running quadlet test case with dependencies", + func(fileName string, exitCode int, errString string, dependencyFiles []string) { + // Write additional files this test depends on to the quadlet dir + for _, dependencyFileName := range dependencyFiles { + dependencyTestCase := loadQuadletTestcase(filepath.Join("quadlet", dependencyFileName)) + err = os.WriteFile(filepath.Join(quadletDir, dependencyFileName), dependencyTestCase.data, 0644) + Expect(err).ToNot(HaveOccurred()) + } + + runQuadletTestCase(fileName, exitCode, errString) + }, + Entry("Volume - Quadlet image (.build)", "build.quadlet.volume", 0, "", []string{"basic.build"}), + Entry("Volume - Quadlet image (.image)", "image.quadlet.volume", 0, "", []string{"basic.image"}), + ) + }) diff --git a/test/e2e/rename_test.go b/test/e2e/rename_test.go index ef82e6c73d..27698e937c 100644 --- a/test/e2e/rename_test.go +++ b/test/e2e/rename_test.go @@ -13,7 +13,7 @@ var _ = Describe("podman rename", func() { It("podman rename on non-existent container", func() { session := podmanTest.Podman([]string{"rename", "doesNotExist", "aNewName"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `no container with name or ID "doesNotExist" found: no such container`)) }) It("Podman rename on existing container with bad name", func() { @@ -25,7 +25,7 @@ var _ = Describe("podman rename", func() { newName := "invalid<>:char" rename := podmanTest.Podman([]string{"rename", ctrName, newName}) rename.WaitWithDefaultTimeout() - Expect(rename).To(ExitWithError()) + Expect(rename).To(ExitWithError(125, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*: invalid argument")) ps := podmanTest.Podman([]string{"ps", "-aq", "--filter", fmt.Sprintf("name=%s", ctrName), "--format", "{{ .Names }}"}) ps.WaitWithDefaultTimeout() diff --git a/test/e2e/restart_test.go b/test/e2e/restart_test.go index d43f6fc235..53872b1818 100644 --- a/test/e2e/restart_test.go +++ b/test/e2e/restart_test.go @@ -2,6 +2,7 @@ package integration import ( "fmt" + "path/filepath" "time" . "github.com/containers/podman/v5/test/utils" @@ -15,7 +16,7 @@ var _ = Describe("Podman restart", func() { It("podman restart bogus container", func() { session := podmanTest.Podman([]string{"start", "123"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, `no container with name or ID "123" found: no such container`)) }) It("podman restart stopped container by name", func() { @@ -192,12 +193,13 @@ var _ = Describe("Podman restart", func() { session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - testCmd := []string{"exec", "host-restart-test", "sh", "-c", "wc -l < /etc/hosts"} + testCmd := []string{"exec", "host-restart-test", "cat", "/etc/hosts"} // before restart beforeRestart := podmanTest.Podman(testCmd) beforeRestart.WaitWithDefaultTimeout() Expect(beforeRestart).Should(ExitCleanly()) + nHostLines := len(beforeRestart.OutputToStringArray()) session = podmanTest.Podman([]string{"restart", "host-restart-test"}) session.WaitWithDefaultTimeout() @@ -208,7 +210,8 @@ var _ = Describe("Podman restart", func() { Expect(afterRestart).Should(ExitCleanly()) // line count should be equal - Expect(beforeRestart.OutputToString()).To(Equal(afterRestart.OutputToString())) + Expect(afterRestart.OutputToStringArray()).To(HaveLen(nHostLines), + "number of host lines post-restart == number of lines pre-restart") }) It("podman restart all stopped containers with --all", func() { @@ -232,10 +235,9 @@ var _ = Describe("Podman restart", func() { }) It("podman restart --cidfile", func() { - tmpDir := GinkgoT().TempDir() - tmpFile := tmpDir + "cid" + cidFile := filepath.Join(tempdir, "cid") - session := podmanTest.Podman([]string{"create", "--cidfile", tmpFile, ALPINE, "top"}) + session := podmanTest.Podman([]string{"create", "--cidfile", cidFile, ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) cid := session.OutputToStringArray()[0] @@ -244,7 +246,7 @@ var _ = Describe("Podman restart", func() { session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - result := podmanTest.Podman([]string{"restart", "--cidfile", tmpFile}) + result := podmanTest.Podman([]string{"restart", "--cidfile", cidFile}) result.WaitWithDefaultTimeout() // FIXME - #20196: Cannot use ExitCleanly() Expect(result).Should(Exit(0)) @@ -253,23 +255,22 @@ var _ = Describe("Podman restart", func() { }) It("podman restart multiple --cidfile", func() { - tmpDir := GinkgoT().TempDir() - tmpFile1 := tmpDir + "cid-1" - tmpFile2 := tmpDir + "cid-2" + cidFile1 := filepath.Join(tempdir, "cid-1") + cidFile2 := filepath.Join(tempdir, "cid-2") - session := podmanTest.Podman([]string{"run", "--cidfile", tmpFile1, "-d", ALPINE, "top"}) + session := podmanTest.Podman([]string{"run", "--cidfile", cidFile1, "-d", ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) cid1 := session.OutputToStringArray()[0] Expect(podmanTest.NumberOfContainers()).To(Equal(1)) - session = podmanTest.Podman([]string{"run", "--cidfile", tmpFile2, "-d", ALPINE, "top"}) + session = podmanTest.Podman([]string{"run", "--cidfile", cidFile2, "-d", ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) cid2 := session.OutputToStringArray()[0] Expect(podmanTest.NumberOfContainers()).To(Equal(2)) - result := podmanTest.Podman([]string{"restart", "--cidfile", tmpFile1, "--cidfile", tmpFile2}) + result := podmanTest.Podman([]string{"restart", "--cidfile", cidFile1, "--cidfile", cidFile2}) result.WaitWithDefaultTimeout() Expect(result).Should(ExitCleanly()) output := result.OutputToString() @@ -282,20 +283,19 @@ var _ = Describe("Podman restart", func() { SkipIfRemote("--latest flag n/a") result := podmanTest.Podman([]string{"restart", "--cidfile", "foobar", "--latest"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring("cannot be used together")) + Expect(result).Should(ExitWithError(125, "--all, --latest, and --cidfile cannot be used together")) + result = podmanTest.Podman([]string{"restart", "--cidfile", "foobar", "--all"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring("cannot be used together")) + Expect(result).Should(ExitWithError(125, "--all, --latest, and --cidfile cannot be used together")) + result = podmanTest.Podman([]string{"restart", "--cidfile", "foobar", "--all", "--latest"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring("cannot be used together")) + Expect(result).Should(ExitWithError(125, "--all, --latest, and --cidfile cannot be used together")) + result = podmanTest.Podman([]string{"restart", "--latest", "--all"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring("cannot be used together")) + Expect(result).Should(ExitWithError(125, "--all and --latest cannot be used together")) }) It("podman restart --filter", func() { @@ -317,7 +317,7 @@ var _ = Describe("Podman restart", func() { session1 = podmanTest.Podman([]string{"restart", cid1, "-f", "status=test"}) session1.WaitWithDefaultTimeout() - Expect(session1).Should(Exit(125)) + Expect(session1).Should(ExitWithError(125, "--filter takes no arguments")) session1 = podmanTest.Podman([]string{"restart", "-a", "--filter", fmt.Sprintf("id=%swrongid", shortCid3)}) session1.WaitWithDefaultTimeout() diff --git a/test/e2e/rm_test.go b/test/e2e/rm_test.go index 17dcc514ab..7a734c9285 100644 --- a/test/e2e/rm_test.go +++ b/test/e2e/rm_test.go @@ -2,11 +2,11 @@ package integration import ( "fmt" + "path/filepath" . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman rm", func() { @@ -28,8 +28,7 @@ var _ = Describe("Podman rm", func() { result := podmanTest.Podman([]string{"rm", cid}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(2)) - Expect(result.ErrorToString()).To(ContainSubstring("containers cannot be removed without force")) + Expect(result).Should(ExitWithError(2, "containers cannot be removed without force")) }) It("podman rm created container", func() { @@ -122,16 +121,15 @@ var _ = Describe("Podman rm", func() { }) It("podman rm --cidfile", func() { - tmpDir := GinkgoT().TempDir() - tmpFile := tmpDir + "cid" + cidFile := filepath.Join(tempdir, "cid") - session := podmanTest.Podman([]string{"create", "--cidfile", tmpFile, ALPINE, "ls"}) + session := podmanTest.Podman([]string{"create", "--cidfile", cidFile, ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) cid := session.OutputToStringArray()[0] Expect(podmanTest.NumberOfContainers()).To(Equal(1)) - result := podmanTest.Podman([]string{"rm", "--cidfile", tmpFile}) + result := podmanTest.Podman([]string{"rm", "--cidfile", cidFile}) result.WaitWithDefaultTimeout() Expect(result).Should(ExitCleanly()) output := result.OutputToString() @@ -140,23 +138,22 @@ var _ = Describe("Podman rm", func() { }) It("podman rm multiple --cidfile", func() { - tmpDir := GinkgoT().TempDir() - tmpFile1 := tmpDir + "cid-1" - tmpFile2 := tmpDir + "cid-2" + cidFile1 := filepath.Join(tempdir, "cid-1") + cidFile2 := filepath.Join(tempdir, "cid-2") - session := podmanTest.Podman([]string{"create", "--cidfile", tmpFile1, ALPINE, "ls"}) + session := podmanTest.Podman([]string{"create", "--cidfile", cidFile1, ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) cid1 := session.OutputToStringArray()[0] Expect(podmanTest.NumberOfContainers()).To(Equal(1)) - session = podmanTest.Podman([]string{"create", "--cidfile", tmpFile2, ALPINE, "ls"}) + session = podmanTest.Podman([]string{"create", "--cidfile", cidFile2, ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) cid2 := session.OutputToStringArray()[0] Expect(podmanTest.NumberOfContainers()).To(Equal(2)) - result := podmanTest.Podman([]string{"rm", "--cidfile", tmpFile1, "--cidfile", tmpFile2}) + result := podmanTest.Podman([]string{"rm", "--cidfile", cidFile1, "--cidfile", cidFile2}) result.WaitWithDefaultTimeout() Expect(result).Should(ExitCleanly()) output := result.OutputToString() @@ -170,23 +167,19 @@ var _ = Describe("Podman rm", func() { result := podmanTest.Podman([]string{"rm", "--cidfile", "foobar", "--latest"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring("--all, --latest, and --cidfile cannot be used together")) + Expect(result).Should(ExitWithError(125, "--all, --latest, and --cidfile cannot be used together")) result = podmanTest.Podman([]string{"rm", "--cidfile", "foobar", "--all"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring("--all, --latest, and --cidfile cannot be used together")) + Expect(result).Should(ExitWithError(125, "--all, --latest, and --cidfile cannot be used together")) result = podmanTest.Podman([]string{"rm", "--cidfile", "foobar", "--all", "--latest"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring("--all, --latest, and --cidfile cannot be used together")) + Expect(result).Should(ExitWithError(125, "--all, --latest, and --cidfile cannot be used together")) result = podmanTest.Podman([]string{"rm", "--latest", "--all"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) - Expect(result.ErrorToString()).To(ContainSubstring("--all and --latest cannot be used together")) + Expect(result).Should(ExitWithError(125, "--all and --latest cannot be used together")) }) It("podman rm --all", func() { @@ -215,8 +208,7 @@ var _ = Describe("Podman rm", func() { session = podmanTest.Podman([]string{"rm", "bogus", cid}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) - Expect(session.ErrorToString()).To(ContainSubstring("\"bogus\" found: no such container")) + Expect(session).Should(ExitWithError(1, `no container with ID or name "bogus" found: no such container`)) if IsRemote() { Expect(session.OutputToString()).To(BeEquivalentTo(cid)) } @@ -233,8 +225,7 @@ var _ = Describe("Podman rm", func() { It("podman rm bogus container", func() { session := podmanTest.Podman([]string{"rm", "bogus"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) - Expect(session.ErrorToString()).To(ContainSubstring("\"bogus\" found: no such container")) + Expect(session).Should(ExitWithError(1, `no container with ID or name "bogus" found: no such container`)) }) It("podman rm bogus container and a running container", func() { @@ -244,13 +235,11 @@ var _ = Describe("Podman rm", func() { session = podmanTest.Podman([]string{"rm", "bogus", "test1"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) - Expect(session.ErrorToString()).To(ContainSubstring("\"bogus\" found: no such container")) + Expect(session).Should(ExitWithError(1, `no container with ID or name "bogus" found: no such container`)) session = podmanTest.Podman([]string{"rm", "test1", "bogus"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) - Expect(session.ErrorToString()).To(ContainSubstring("\"bogus\" found: no such container")) + Expect(session).Should(ExitWithError(1, `no container with ID or name "bogus" found: no such container`)) }) It("podman rm --ignore bogus container and a running container", func() { @@ -260,8 +249,7 @@ var _ = Describe("Podman rm", func() { session = podmanTest.Podman([]string{"rm", "--ignore", "test1", "bogus"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(2)) - Expect(session.ErrorToString()).To(ContainSubstring("containers cannot be removed without force")) + Expect(session).Should(ExitWithError(2, "containers cannot be removed without force")) session = podmanTest.Podman([]string{"rm", "-t", "0", "--force", "--ignore", "bogus", "test1"}) session.WaitWithDefaultTimeout() @@ -293,8 +281,7 @@ var _ = Describe("Podman rm", func() { session1 = podmanTest.Podman([]string{"rm", cid1, "-f", "--filter", "status=running"}) session1.WaitWithDefaultTimeout() - Expect(session1).Should(Exit(125)) - Expect(session1.ErrorToString()).To(ContainSubstring("--filter takes no arguments")) + Expect(session1).Should(ExitWithError(125, "--filter takes no arguments")) session1 = podmanTest.Podman([]string{"rm", "-a", "-f", "--filter", fmt.Sprintf("id=%swrongid", shortCid3)}) session1.WaitWithDefaultTimeout() diff --git a/test/e2e/rmi_test.go b/test/e2e/rmi_test.go index 5caec5f6da..bc5d2d1fa2 100644 --- a/test/e2e/rmi_test.go +++ b/test/e2e/rmi_test.go @@ -7,7 +7,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman rmi", func() { @@ -15,7 +14,7 @@ var _ = Describe("Podman rmi", func() { It("podman rmi bogus image", func() { session := podmanTest.Podman([]string{"rmi", "debian:6.0.10"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "debian:6.0.10: image not known")) }) @@ -72,10 +71,10 @@ var _ = Describe("Podman rmi", func() { It("podman rmi image with tags by ID cannot be done without force", func() { podmanTest.AddImageToRWStore(CIRROS_IMAGE) - setup := podmanTest.Podman([]string{"images", "-q", CIRROS_IMAGE}) + setup := podmanTest.Podman([]string{"images", "-q", "--no-trunc", CIRROS_IMAGE}) setup.WaitWithDefaultTimeout() Expect(setup).Should(ExitCleanly()) - cirrosID := setup.OutputToString() + cirrosID := setup.OutputToString()[7:] session := podmanTest.Podman([]string{"tag", "cirros", "foo:bar", "foo"}) session.WaitWithDefaultTimeout() @@ -84,7 +83,7 @@ var _ = Describe("Podman rmi", func() { // Trying without --force should fail result := podmanTest.Podman([]string{"rmi", cirrosID}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) + Expect(result).To(ExitWithError(125, fmt.Sprintf(`unable to delete image "%s" by ID with more than one tag ([localhost/foo:latest localhost/foo:bar %s]): please force removal`, cirrosID, CIRROS_IMAGE))) // With --force it should work resultForce := podmanTest.Podman([]string{"rmi", "-f", cirrosID}) @@ -93,7 +92,6 @@ var _ = Describe("Podman rmi", func() { }) It("podman rmi image that is a parent of another image", func() { - Skip("I need help with this one. i don't understand what is going on") podmanTest.AddImageToRWStore(CIRROS_IMAGE) session := podmanTest.Podman([]string{"run", "--name", "c_test", CIRROS_IMAGE, "true"}) session.WaitWithDefaultTimeout() @@ -110,22 +108,18 @@ var _ = Describe("Podman rmi", func() { session = podmanTest.Podman([]string{"rmi", CIRROS_IMAGE}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - - session = podmanTest.Podman([]string{"images", "-q"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToStringArray()).To(HaveLen(12)) + Expect(session.OutputToString()).To(Equal("Untagged: " + CIRROS_IMAGE)) session = podmanTest.Podman([]string{"images", "--sort", "created", "--format", "{{.Id}}", "--all"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - Expect(session.OutputToStringArray()).To(HaveLen(13), - "Output from 'podman images -q -a'") + Expect(session.OutputToStringArray()).To(HaveLen(len(CACHE_IMAGES)+1), + "Output from 'podman images'") untaggedImg := session.OutputToStringArray()[1] session = podmanTest.Podman([]string{"rmi", "-f", untaggedImg}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(2), "UntaggedImg is '%s'", untaggedImg) + Expect(session).Should(ExitWithError(125, fmt.Sprintf(`cannot remove read-only image "%s"`, untaggedImg))) }) It("podman rmi image that is created from another named imaged", func() { @@ -253,8 +247,7 @@ RUN find $LOCAL It("podman image rm is the same as rmi", func() { session := podmanTest.Podman([]string{"image", "rm"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("image name or ID must be specified")) + Expect(session).Should(ExitWithError(125, "image name or ID must be specified")) }) It("podman image rm - concurrent with shared layers", func() { diff --git a/test/e2e/run_apparmor_test.go b/test/e2e/run_apparmor_test.go index 3d4f5ed63c..152efec8f6 100644 --- a/test/e2e/run_apparmor_test.go +++ b/test/e2e/run_apparmor_test.go @@ -98,7 +98,7 @@ profile aa-test-profile flags=(attach_disconnected,mediate_deleted) { skipIfAppArmorDisabled() session := podmanTest.Podman([]string{"run", "--security-opt", "apparmor=invalid", ALPINE, "ls"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(126, `AppArmor profile "invalid" specified but not loaded`)) }) It("podman run apparmor unconfined", func() { @@ -118,7 +118,7 @@ profile aa-test-profile flags=(attach_disconnected,mediate_deleted) { // Should fail if user specifies apparmor on disabled system session := podmanTest.Podman([]string{"create", "--security-opt", fmt.Sprintf("apparmor=%s", apparmor.Profile), ALPINE, "ls"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, fmt.Sprintf(`apparmor profile "%s" specified, but Apparmor is not enabled on this system`, apparmor.Profile))) }) It("podman run apparmor disabled no default", func() { diff --git a/test/e2e/run_cpu_test.go b/test/e2e/run_cpu_test.go index 9115937818..13e5586baa 100644 --- a/test/e2e/run_cpu_test.go +++ b/test/e2e/run_cpu_test.go @@ -109,13 +109,13 @@ var _ = Describe("Podman run cpu", func() { It("podman run cpus and cpu-period", func() { result := podmanTest.Podman([]string{"run", "--rm", "--cpu-period=5000", "--cpus=0.5", ALPINE, "ls"}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) + Expect(result).To(ExitWithError(125, "--cpu-period and --cpus cannot be set together")) }) It("podman run cpus and cpu-quota", func() { result := podmanTest.Podman([]string{"run", "--rm", "--cpu-quota=5000", "--cpus=0.5", ALPINE, "ls"}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) + Expect(result).To(ExitWithError(125, "--cpu-quota and --cpus cannot be set together")) }) It("podman run invalid cpu-rt-period with cgroupsv2", func() { diff --git a/test/e2e/run_device_test.go b/test/e2e/run_device_test.go index 129f3cdaef..ef6fe8b486 100644 --- a/test/e2e/run_device_test.go +++ b/test/e2e/run_device_test.go @@ -9,7 +9,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) func createContainersConfFileWithDevices(pTest *PodmanTestIntegration, devices string) { @@ -30,7 +29,7 @@ var _ = Describe("Podman run device", func() { It("podman run bad device test", func() { session := podmanTest.Podman([]string{"run", "-q", "--device", "/dev/baddevice", ALPINE, "true"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "stat /dev/baddevice: no such file or directory")) }) It("podman run device test", func() { @@ -38,7 +37,8 @@ var _ = Describe("Podman run device", func() { session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) if !isRootless() { - session = podmanTest.Podman([]string{"run", "-q", "--security-opt", "label=disable", "--device", "/dev/kmsg", "--cap-add", "SYS_ADMIN", ALPINE, "head", "-n", "1", "/dev/kmsg"}) + // Kernel 6.9.0 (2024-03) requires SYSLOG + session = podmanTest.Podman([]string{"run", "-q", "--security-opt", "label=disable", "--device", "/dev/kmsg", "--cap-add", "SYS_ADMIN,SYSLOG", ALPINE, "head", "-n", "1", "/dev/kmsg"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) } @@ -67,7 +67,7 @@ var _ = Describe("Podman run device", func() { It("podman run device rename and bad permission test", func() { session := podmanTest.Podman([]string{"run", "-q", "--security-opt", "label=disable", "--device", "/dev/kmsg:/dev/kmsg1:rd", ALPINE, "true"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "invalid device mode: rd")) }) It("podman run device host device and container device parameter are directories", func() { @@ -94,7 +94,8 @@ var _ = Describe("Podman run device", func() { // verify --privileged is required session2 := podmanTest.Podman([]string{"run", ALPINE, "test", "-c", "/dev/kmsg"}) session2.WaitWithDefaultTimeout() - Expect(session2).Should(Exit(1)) + Expect(session2).Should(ExitWithError(1, "")) + Expect(session2.OutputToString()).To(BeEmpty()) }) It("podman run CDI device test", func() { diff --git a/test/e2e/run_dns_test.go b/test/e2e/run_dns_test.go index d6f8a4b09a..126c17a741 100644 --- a/test/e2e/run_dns_test.go +++ b/test/e2e/run_dns_test.go @@ -25,7 +25,7 @@ var _ = Describe("Podman run dns", func() { It("podman run add bad dns server", func() { session := podmanTest.Podman([]string{"run", "--dns=foobar", ALPINE, "ls"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "foobar is not an ip address")) }) It("podman run add dns server", func() { @@ -46,7 +46,7 @@ var _ = Describe("Podman run dns", func() { It("podman run add bad host", func() { session := podmanTest.Podman([]string{"run", "--add-host=foo:1.2", ALPINE, "ls"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `invalid IP address in add-host: "1.2"`)) }) It("podman run add host", func() { @@ -60,7 +60,7 @@ var _ = Describe("Podman run dns", func() { session := podmanTest.Podman([]string{"run", "--hostname=foobar", ALPINE, "cat", "/etc/hostname"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).To(Equal("foobar")) + Expect(string(session.Out.Contents())).To(Equal("foobar\n")) session = podmanTest.Podman([]string{"run", "--hostname=foobar", ALPINE, "hostname"}) session.WaitWithDefaultTimeout() @@ -78,15 +78,15 @@ var _ = Describe("Podman run dns", func() { It("podman run mutually excludes --dns* and --network", func() { session := podmanTest.Podman([]string{"run", "--dns=1.2.3.4", "--network", "container:ALPINE", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "conflicting options: dns and the network mode: container")) session = podmanTest.Podman([]string{"run", "--dns-opt=1.2.3.4", "--network", "container:ALPINE", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "conflicting options: dns and the network mode: container")) session = podmanTest.Podman([]string{"run", "--dns-search=foobar.com", "--network", "none", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "conflicting options: dns and the network mode: none")) session = podmanTest.Podman([]string{"run", "--dns=1.2.3.4", "--network", "host", ALPINE}) session.WaitWithDefaultTimeout() diff --git a/test/e2e/run_entrypoint_test.go b/test/e2e/run_entrypoint_test.go index e39c377cce..687847c1f3 100644 --- a/test/e2e/run_entrypoint_test.go +++ b/test/e2e/run_entrypoint_test.go @@ -4,7 +4,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman run entrypoint", func() { @@ -17,7 +16,7 @@ CMD [] podmanTest.BuildImage(dockerfile, "foobar.com/entrypoint:latest", "false") session := podmanTest.Podman([]string{"run", "foobar.com/entrypoint:latest"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Or(Exit(126), Exit(127))) + Expect(session).Should(ExitWithError(126, "open executable: Operation not permitted: OCI permission denied")) }) It("podman run entrypoint == [\"\"]", func() { diff --git a/test/e2e/run_env_test.go b/test/e2e/run_env_test.go index e3989fad53..bc459b7475 100644 --- a/test/e2e/run_env_test.go +++ b/test/e2e/run_env_test.go @@ -6,7 +6,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman run", func() { @@ -54,7 +53,7 @@ var _ = Describe("Podman run", func() { session = podmanTest.Podman([]string{"run", "--rm", "--env", "FOO", ALPINE, "printenv", "FOO"}) session.WaitWithDefaultTimeout() Expect(session.OutputToString()).To(BeEmpty()) - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "")) session = podmanTest.Podman([]string{"run", "--rm", ALPINE, "printenv"}) session.WaitWithDefaultTimeout() @@ -90,8 +89,7 @@ ENV hello=world session.WaitWithDefaultTimeout() if IsRemote() { // podman-remote does not support --env-host - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("unknown flag: --env-host")) + Expect(session).Should(ExitWithError(125, "unknown flag: --env-host")) return } Expect(session).Should(ExitCleanly()) @@ -125,7 +123,7 @@ ENV hello=world session = podmanTest.Podman([]string{"run", "--http-proxy=false", ALPINE, "printenv", "http_proxy"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "")) Expect(session.OutputToString()).To(Equal("")) session = podmanTest.Podman([]string{"run", "--env", "http_proxy=5.6.7.8", ALPINE, "printenv", "http_proxy"}) diff --git a/test/e2e/run_exit_test.go b/test/e2e/run_exit_test.go index e31a1a6e75..b300567ce5 100644 --- a/test/e2e/run_exit_test.go +++ b/test/e2e/run_exit_test.go @@ -7,7 +7,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman run exit", func() { @@ -15,19 +14,19 @@ var _ = Describe("Podman run exit", func() { It("podman run exit define.ExecErrorCodeGeneric", func() { result := podmanTest.Podman([]string{"run", "--foobar", ALPINE, "ls", "$tmp"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(define.ExecErrorCodeGeneric)) + Expect(result).Should(ExitWithError(define.ExecErrorCodeGeneric, "unknown flag: --foobar")) }) It("podman run exit ExecErrorCodeCannotInvoke", func() { result := podmanTest.Podman([]string{"run", ALPINE, "/etc"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(define.ExecErrorCodeCannotInvoke)) + Expect(result).Should(ExitWithError(define.ExecErrorCodeCannotInvoke, "open executable: Operation not permitted: OCI permission denied")) }) It("podman run exit ExecErrorCodeNotFound", func() { result := podmanTest.Podman([]string{"run", ALPINE, "foobar"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(define.ExecErrorCodeNotFound)) + Expect(result).Should(ExitWithError(define.ExecErrorCodeNotFound, "executable file `foobar` not found in $PATH: No such file or directory: OCI runtime attempted to invoke a command that was not found")) }) It("podman run exit 0", func() { @@ -39,12 +38,12 @@ var _ = Describe("Podman run exit", func() { It("podman run exit 50", func() { result := podmanTest.Podman([]string{"run", ALPINE, "sh", "-c", "exit 50"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(50)) + Expect(result).Should(ExitWithError(50, "")) }) It("podman run exit 125", func() { result := podmanTest.Podman([]string{"run", ALPINE, "sh", "-c", fmt.Sprintf("exit %d", define.ExecErrorCodeGeneric)}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(define.ExecErrorCodeGeneric)) + Expect(result).Should(ExitWithError(define.ExecErrorCodeGeneric, "")) }) }) diff --git a/test/e2e/run_memory_test.go b/test/e2e/run_memory_test.go index 0fe0826683..e535e6bba7 100644 --- a/test/e2e/run_memory_test.go +++ b/test/e2e/run_memory_test.go @@ -82,14 +82,13 @@ var _ = Describe("Podman run memory", func() { // create a container that gets oomkilled session := podmanTest.Podman([]string{"run", "--name", ctrName, "--read-only", "--memory-swap=20m", "--memory=20m", "--oom-score-adj=1000", ALPINE, "sort", "/dev/urandom"}) session.WaitWithDefaultTimeout() - Expect(session).Should(ExitWithError()) + Expect(session).Should(ExitWithError(137, "")) inspect := podmanTest.Podman(([]string{"inspect", "--format", "{{.State.OOMKilled}} {{.State.ExitCode}}", ctrName})) inspect.WaitWithDefaultTimeout() Expect(inspect).Should(ExitCleanly()) // Check oomkilled and exit code values - Expect(inspect.OutputToString()).Should(ContainSubstring("true")) - Expect(inspect.OutputToString()).Should(ContainSubstring("137")) + Expect(inspect.OutputToString()).Should(Equal("true 137")) }) It("podman run memory test on successfully exited container", func() { @@ -102,7 +101,6 @@ var _ = Describe("Podman run memory", func() { inspect.WaitWithDefaultTimeout() Expect(inspect).Should(ExitCleanly()) // Check oomkilled and exit code values - Expect(inspect.OutputToString()).Should(ContainSubstring("false")) - Expect(inspect.OutputToString()).Should(ContainSubstring("0")) + Expect(inspect.OutputToString()).Should(Equal("false 0")) }) }) diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go index b854e76010..e457a2cbd9 100644 --- a/test/e2e/run_networking_test.go +++ b/test/e2e/run_networking_test.go @@ -10,7 +10,7 @@ import ( "syscall" "github.com/containernetworking/plugins/pkg/ns" - "github.com/containers/common/libnetwork/types" + "github.com/containers/podman/v5/pkg/domain/entities" . "github.com/containers/podman/v5/test/utils" "github.com/containers/storage/pkg/stringid" . "github.com/onsi/ginkgo/v2" @@ -36,7 +36,7 @@ var _ = Describe("Podman run networking", func() { session.WaitWithDefaultTimeout() defer podmanTest.removeNetwork(net) Expect(session).Should(ExitCleanly()) - var results []types.Network + var results []entities.NetworkInspectReport err := json.Unmarshal([]byte(session.OutputToString()), &results) Expect(err).ToNot(HaveOccurred()) Expect(results).To(HaveLen(1)) @@ -66,7 +66,7 @@ var _ = Describe("Podman run networking", func() { session = podmanTest.Podman([]string{"exec", "con1", "nslookup", "google.com", aardvarkDNSGateway}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "")) Expect(session.OutputToString()).To(ContainSubstring(";; connection timed out; no servers could be reached")) }) @@ -83,7 +83,7 @@ var _ = Describe("Podman run networking", func() { session.WaitWithDefaultTimeout() defer podmanTest.removeNetwork(net) Expect(session).Should(ExitCleanly()) - var results []types.Network + var results []entities.NetworkInspectReport err := json.Unmarshal([]byte(session.OutputToString()), &results) Expect(err).ToNot(HaveOccurred()) Expect(results).To(HaveLen(1)) @@ -118,7 +118,7 @@ var _ = Describe("Podman run networking", func() { session = podmanTest.Podman([]string{"exec", "con1", "nslookup", "google.com", aardvarkDNSGateway}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "")) Expect(session.OutputToString()).To(ContainSubstring(";; connection timed out; no servers could be reached")) }) @@ -144,11 +144,9 @@ var _ = Describe("Podman run networking", func() { session := podmanTest.RunContainerWithNetworkTest("none") session.WaitWithDefaultTimeout() if _, found := os.LookupEnv("http_proxy"); found { - Expect(session).Should(Exit(5)) - Expect(session.ErrorToString()).To(ContainSubstring("Could not resolve proxy:")) + Expect(session).Should(ExitWithError(5, "Could not resolve proxy:")) } else { - Expect(session).Should(Exit(6)) - Expect(session.ErrorToString()).To(ContainSubstring("Could not resolve host: www.redhat.com")) + Expect(session).Should(ExitWithError(6, "Could not resolve host: www.redhat.com")) } }) @@ -209,7 +207,7 @@ var _ = Describe("Podman run networking", func() { Expect(inspectOut[0].NetworkSettings.Ports).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0].HostPort).To(Not(Equal("80"))) - Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostIP", "0.0.0.0")) }) It("podman run -p 80-82 -p 8090:8090", func() { @@ -221,16 +219,16 @@ var _ = Describe("Podman run networking", func() { Expect(inspectOut[0].NetworkSettings.Ports).To(HaveLen(4)) Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0].HostPort).To(Not(Equal("80"))) - Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostIP", "0.0.0.0")) Expect(inspectOut[0].NetworkSettings.Ports["81/tcp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["81/tcp"][0].HostPort).To(Not(Equal("81"))) - Expect(inspectOut[0].NetworkSettings.Ports["81/tcp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["81/tcp"][0]).To(HaveField("HostIP", "0.0.0.0")) Expect(inspectOut[0].NetworkSettings.Ports["82/tcp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["82/tcp"][0].HostPort).To(Not(Equal("82"))) - Expect(inspectOut[0].NetworkSettings.Ports["82/tcp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["82/tcp"][0]).To(HaveField("HostIP", "0.0.0.0")) Expect(inspectOut[0].NetworkSettings.Ports["8090/tcp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["8090/tcp"][0]).To(HaveField("HostPort", "8090")) - Expect(inspectOut[0].NetworkSettings.Ports["8090/tcp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["8090/tcp"][0]).To(HaveField("HostIP", "0.0.0.0")) }) It("podman run -p 80-81 -p 8180-8181", func() { @@ -242,16 +240,16 @@ var _ = Describe("Podman run networking", func() { Expect(inspectOut[0].NetworkSettings.Ports).To(HaveLen(4)) Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0].HostPort).To(Not(Equal("80"))) - Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostIP", "0.0.0.0")) Expect(inspectOut[0].NetworkSettings.Ports["81/tcp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["81/tcp"][0].HostPort).To(Not(Equal("81"))) - Expect(inspectOut[0].NetworkSettings.Ports["81/tcp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["81/tcp"][0]).To(HaveField("HostIP", "0.0.0.0")) Expect(inspectOut[0].NetworkSettings.Ports["8180/tcp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["8180/tcp"][0].HostPort).To(Not(Equal("8180"))) - Expect(inspectOut[0].NetworkSettings.Ports["8180/tcp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["8180/tcp"][0]).To(HaveField("HostIP", "0.0.0.0")) Expect(inspectOut[0].NetworkSettings.Ports["8181/tcp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["8181/tcp"][0].HostPort).To(Not(Equal("8181"))) - Expect(inspectOut[0].NetworkSettings.Ports["8181/tcp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["8181/tcp"][0]).To(HaveField("HostIP", "0.0.0.0")) }) It("podman run -p 80 -p 8280-8282:8280-8282", func() { @@ -263,16 +261,16 @@ var _ = Describe("Podman run networking", func() { Expect(inspectOut[0].NetworkSettings.Ports).To(HaveLen(4)) Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0].HostPort).To(Not(Equal("80"))) - Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostIP", "0.0.0.0")) Expect(inspectOut[0].NetworkSettings.Ports["8280/tcp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["8280/tcp"][0]).To(HaveField("HostPort", "8280")) - Expect(inspectOut[0].NetworkSettings.Ports["8280/tcp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["8280/tcp"][0]).To(HaveField("HostIP", "0.0.0.0")) Expect(inspectOut[0].NetworkSettings.Ports["8281/tcp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["8281/tcp"][0]).To(HaveField("HostPort", "8281")) - Expect(inspectOut[0].NetworkSettings.Ports["8281/tcp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["8281/tcp"][0]).To(HaveField("HostIP", "0.0.0.0")) Expect(inspectOut[0].NetworkSettings.Ports["8282/tcp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["8282/tcp"][0]).To(HaveField("HostPort", "8282")) - Expect(inspectOut[0].NetworkSettings.Ports["8282/tcp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["8282/tcp"][0]).To(HaveField("HostIP", "0.0.0.0")) }) It("podman run -p 8380:80", func() { @@ -284,7 +282,7 @@ var _ = Describe("Podman run networking", func() { Expect(inspectOut[0].NetworkSettings.Ports).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostPort", "8380")) - Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostIP", "0.0.0.0")) }) It("podman run -p 8480:80/TCP", func() { @@ -298,7 +296,7 @@ var _ = Describe("Podman run networking", func() { // "tcp" in lower characters Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostPort", "8480")) - Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostIP", "0.0.0.0")) }) It("podman run -p 80/udp", func() { @@ -310,7 +308,7 @@ var _ = Describe("Podman run networking", func() { Expect(inspectOut[0].NetworkSettings.Ports).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["80/udp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["80/udp"][0].HostPort).To(Not(Equal("80"))) - Expect(inspectOut[0].NetworkSettings.Ports["80/udp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["80/udp"][0]).To(HaveField("HostIP", "0.0.0.0")) }) It("podman run -p 127.0.0.1:8580:80", func() { @@ -370,7 +368,7 @@ var _ = Describe("Podman run networking", func() { Expect(inspectOut[0].NetworkSettings.Ports).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0].HostPort).To(Not(Equal("0"))) - Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostIP", "0.0.0.0")) }) It("podman run --expose 80/udp -P", func() { @@ -382,7 +380,7 @@ var _ = Describe("Podman run networking", func() { Expect(inspectOut[0].NetworkSettings.Ports).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["80/udp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["80/udp"][0].HostPort).To(Not(Equal("0"))) - Expect(inspectOut[0].NetworkSettings.Ports["80/udp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["80/udp"][0]).To(HaveField("HostIP", "0.0.0.0")) }) It("podman run --expose 80 -p 80", func() { @@ -394,7 +392,7 @@ var _ = Describe("Podman run networking", func() { Expect(inspectOut[0].NetworkSettings.Ports).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0].HostPort).To(Not(Equal("80"))) - Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostIP", "0.0.0.0")) }) It("podman run --publish-all with EXPOSE port ranges in Dockerfile", func() { @@ -455,7 +453,7 @@ EXPOSE 2004-2005/tcp`, ALPINE) Expect(inspectOut[0].NetworkSettings.Ports).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["8181/tcp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["8181/tcp"][0].HostPort).To(Not(Equal("8181"))) - Expect(inspectOut[0].NetworkSettings.Ports["8181/tcp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["8181/tcp"][0]).To(HaveField("HostIP", "0.0.0.0")) }) It("podman run -p xxx:8080 -p yyy:8080", func() { @@ -483,7 +481,7 @@ EXPOSE 2004-2005/tcp`, ALPINE) Expect(inspectOut[0].NetworkSettings.Ports).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"]).To(HaveLen(1)) Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostPort", "9280")) - Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostIP", "")) + Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0]).To(HaveField("HostIP", "0.0.0.0")) }) It("podman run network expose host port 80 to container port", func() { @@ -498,7 +496,7 @@ EXPOSE 2004-2005/tcp`, ALPINE) Expect(results.OutputToString()).To(ContainSubstring(strconv.Itoa(port2))) ncBusy := SystemExec("nc", []string{"-l", "-p", strconv.Itoa(port1)}) - Expect(ncBusy).To(ExitWithError()) + Expect(ncBusy).To(ExitWithError(2, fmt.Sprintf("Ncat: bind to 0.0.0.0:%d: Address already in use. QUITTING.", port1))) }) It("podman run network expose host port 18081 to container port 8000 using rootlesskit port handler", func() { @@ -509,7 +507,7 @@ EXPOSE 2004-2005/tcp`, ALPINE) Expect(session).Should(ExitCleanly()) ncBusy := SystemExec("nc", []string{"-l", "-p", strconv.Itoa(port2)}) - Expect(ncBusy).To(ExitWithError()) + Expect(ncBusy).To(ExitWithError(2, fmt.Sprintf("Ncat: bind to [::]:%d: Address already in use. QUITTING.", port2))) }) It("podman run slirp4netns verify net.ipv6.conf.default.accept_dad=0", func() { @@ -532,23 +530,15 @@ EXPOSE 2004-2005/tcp`, ALPINE) Expect(session.OutputToString()).To(Equal(sysctlValue)) }) - It("podman run network expose host port 18082 to container port 8000 using slirp4netns port handler", func() { - session := podmanTest.Podman([]string{"run", "--network", "slirp4netns:port_handler=slirp4netns", "-dt", "-p", "18082:8000", ALPINE, "/bin/sh"}) - session.Wait(30) - Expect(session).Should(ExitCleanly()) - ncBusy := SystemExec("nc", []string{"-l", "-p", "18082"}) - Expect(ncBusy).To(ExitWithError()) - }) - It("podman run network expose host port 8080 to container port 8000 using invalid port handler", func() { session := podmanTest.Podman([]string{"run", "--network", "slirp4netns:port_handler=invalid", "-dt", "-p", "8080:8000", ALPINE, "/bin/sh"}) - session.Wait(30) - Expect(session).To(ExitWithError()) + session.WaitWithDefaultTimeout() + Expect(session).To(ExitWithError(126, `unknown port_handler for slirp4netns: "invalid"`)) }) It("podman run slirp4netns network with host loopback", func() { session := podmanTest.Podman([]string{"run", "--cap-add", "net_raw", "--network", "slirp4netns:allow_host_loopback=true", ALPINE, "ping", "-c1", "10.0.2.2"}) - session.Wait(30) + session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) }) @@ -570,8 +560,7 @@ EXPOSE 2004-2005/tcp`, ALPINE) if strings.Contains(slirp4netnsHelp.OutputToString(), "cidr") { Expect(session).Should(ExitCleanly()) } else { - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("cidr not supported")) + Expect(session).To(ExitWithError(125, "cidr not supported")) } }) @@ -593,8 +582,7 @@ EXPOSE 2004-2005/tcp`, ALPINE) } else { session := podmanTest.Podman([]string{"run", "--network", networkConfiguration, "-dt", ALPINE, "nc", "-w", "2", "10.0.2.2", port}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("outbound_addr not supported")) + Expect(session).To(ExitWithError(125, "outbound_addr not supported")) } }) @@ -623,8 +611,7 @@ EXPOSE 2004-2005/tcp`, ALPINE) } else { session := podmanTest.Podman([]string{"run", "--network", networkConfiguration, ALPINE, "nc", "-w", "2", "10.0.2.2", port}) session.Wait(30) - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("outbound_addr not supported")) + Expect(session).To(ExitWithError(125, "outbound_addr not supported")) } }) @@ -731,8 +718,7 @@ EXPOSE 2004-2005/tcp`, ALPINE) ctr2 := podmanTest.Podman([]string{"run", "-d", "--net=container:" + ctrName, "--add-host", "host1:127.0.0.1", ALPINE, "true"}) ctr2.WaitWithDefaultTimeout() - Expect(ctr2).Should(ExitWithError()) - Expect(ctr2.ErrorToString()).Should(ContainSubstring("cannot set extra host entries when the container is joined to another containers network namespace: invalid configuration")) + Expect(ctr2).Should(ExitWithError(125, "cannot set extra host entries when the container is joined to another containers network namespace: invalid configuration")) }) It("podman run --net container: copies hosts and resolv", func() { @@ -780,7 +766,7 @@ EXPOSE 2004-2005/tcp`, ALPINE) session = podmanTest.Podman([]string{"run", "--uidmap", "0:100000:1000", "--rm", "--hostname", "foohostname", "-v", "/etc/hosts:/etc/hosts", ALPINE, "grep", "foohostname", "/etc/hosts"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "")) }) It("podman run network in user created network namespace", func() { @@ -980,8 +966,7 @@ EXPOSE 2004-2005/tcp`, ALPINE) It("podman run network in bogus user created network namespace", func() { session := podmanTest.Podman([]string{"run", "-dt", "--net", "ns:/run/netns/xxy", ALPINE, "wget", "www.redhat.com"}) session.Wait(90) - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("stat /run/netns/xxy: no such file or directory")) + Expect(session).To(ExitWithError(125, "faccessat /run/netns/xxy: no such file or directory")) }) It("podman run in custom CNI network with --static-ip", func() { @@ -1126,7 +1111,7 @@ EXPOSE 2004-2005/tcp`, ALPINE) session = podmanTest.Podman([]string{"run", "--name", "con3", "--pod", pod2, CITEST_IMAGE, "nslookup", "con1"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "")) Expect(session.OutputToString()).To(ContainSubstring("server can't find con1.dns.podman: NXDOMAIN")) session = podmanTest.Podman([]string{"run", "--name", "con4", "--network", net, CITEST_IMAGE, "nslookup", pod2 + ".dns.podman"}) diff --git a/test/e2e/run_ns_test.go b/test/e2e/run_ns_test.go index a9f83c25e8..80f463a941 100644 --- a/test/e2e/run_ns_test.go +++ b/test/e2e/run_ns_test.go @@ -25,7 +25,7 @@ var _ = Describe("Podman run ns", func() { session = podmanTest.Podman([]string{"run", "--pid=badpid", fedoraMinimal, "bash", "-c", "echo $$"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "unrecognized namespace mode badpid passed")) }) It("podman run --cgroup private test", func() { @@ -79,7 +79,7 @@ var _ = Describe("Podman run ns", func() { It("podman run bad ipc pid test", func() { session := podmanTest.Podman([]string{"run", "--ipc=badpid", fedoraMinimal, "bash", "-c", "echo $$"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "unrecognized namespace mode badpid passed")) }) It("podman run mounts fresh cgroup", func() { diff --git a/test/e2e/run_passwd_test.go b/test/e2e/run_passwd_test.go index ea10199d95..3ce00b0bdc 100644 --- a/test/e2e/run_passwd_test.go +++ b/test/e2e/run_passwd_test.go @@ -71,7 +71,7 @@ USER 1000`, ALPINE) It("podman run non-numeric group not specified in container", func() { session := podmanTest.Podman([]string{"run", "--read-only", "-u", "root:doesnotexist", BB, "mount"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(126, "unable to find group doesnotexist: no matching entries in group file")) }) It("podman run numeric group specified in container", func() { diff --git a/test/e2e/run_seccomp_test.go b/test/e2e/run_seccomp_test.go index 307acb2e8c..fd28ff7182 100644 --- a/test/e2e/run_seccomp_test.go +++ b/test/e2e/run_seccomp_test.go @@ -1,10 +1,12 @@ package integration import ( + "fmt" + "path/filepath" + . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman run", func() { @@ -25,7 +27,7 @@ var _ = Describe("Podman run", func() { It("podman run --seccomp-policy invalid", func() { session := podmanTest.Podman([]string{"run", "--seccomp-policy", "invalid", alpineSeccomp, "ls"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `invalid seccomp policy "invalid": valid policies are ["default" "image"]`)) }) It("podman run --seccomp-policy image (block all syscalls)", func() { @@ -34,12 +36,26 @@ var _ = Describe("Podman run", func() { // TODO: we're getting a "cannot start a container that has // stopped" error which seems surprising. Investigate // why that is so. - Expect(session).To(ExitWithError()) + base := filepath.Base(podmanTest.OCIRuntime) + if base == "runc" { + // TODO: worse than that. With runc, we get two alternating failures: + // 126 + cannot start a container that has stopped + // 127 + failed to connect to container's attach socket ... ENOENT + Expect(session.ExitCode()).To(BeNumerically(">=", 126), "Exit status using runc") + } else if base == "crun" { + expect := fmt.Sprintf("OCI runtime error: %s: read from the init process", podmanTest.OCIRuntime) + if IsRemote() { + expect = fmt.Sprintf("for attach: %s: read from the init process: OCI runtime error", podmanTest.OCIRuntime) + } + Expect(session).To(ExitWithError(126, expect)) + } else { + Skip("Not valid with the current OCI runtime") + } }) It("podman run --seccomp-policy image (bogus profile)", func() { session := podmanTest.Podman([]string{"run", "--seccomp-policy", "image", alpineBogusSeccomp, "ls"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "loading seccomp profile failed: decoding seccomp profile failed: invalid character 'B' looking for beginning of value")) }) }) diff --git a/test/e2e/run_selinux_test.go b/test/e2e/run_selinux_test.go index 66a8374681..50ac5b58ca 100644 --- a/test/e2e/run_selinux_test.go +++ b/test/e2e/run_selinux_test.go @@ -7,7 +7,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" "github.com/opencontainers/selinux/go-selinux" ) @@ -124,7 +123,7 @@ var _ = Describe("Podman run", func() { session = podmanTest.Podman([]string{"run", "--security-opt", "label=type:spc_t", "--security-opt", "label=filetype:foobar", fedoraMinimal, "ls", "-Z", "/dev"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(126)) + Expect(session).Should(ExitWithError(126, "invalid argument")) }) It("podman exec selinux check", func() { diff --git a/test/e2e/run_signal_test.go b/test/e2e/run_signal_test.go index 45b394429d..8f81186aa8 100644 --- a/test/e2e/run_signal_test.go +++ b/test/e2e/run_signal_test.go @@ -110,7 +110,7 @@ var _ = Describe("Podman run with --sig-proxy", func() { Expect(killSession).Should(ExitCleanly()) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(2, "SIGFPE: floating-point exception")) Expect(session.OutputToString()).To(Not(ContainSubstring("Received"))) }) diff --git a/test/e2e/run_staticip_test.go b/test/e2e/run_staticip_test.go index b644ccbd6e..9fda9f4a4e 100644 --- a/test/e2e/run_staticip_test.go +++ b/test/e2e/run_staticip_test.go @@ -20,19 +20,19 @@ var _ = Describe("Podman run with --ip flag", func() { It("Podman run --ip with garbage address", func() { result := podmanTest.Podman([]string{"run", "--ip", "114232346", ALPINE, "ls"}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) + Expect(result).To(ExitWithError(125, `"114232346" is not an ip address`)) }) It("Podman run --ip with v6 address", func() { result := podmanTest.Podman([]string{"run", "--ip", "2001:db8:bad:beef::1", ALPINE, "ls"}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) + Expect(result).To(ExitWithError(126, "requested static ip 2001:db8:bad:beef::1 not in any subnet on network podman")) }) It("Podman run --ip with non-allocatable IP", func() { result := podmanTest.Podman([]string{"run", "--ip", "203.0.113.124", ALPINE, "ls"}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) + Expect(result).To(ExitWithError(126, "requested static ip 203.0.113.124 not in any subnet on network podman")) }) It("Podman run with specified static IP has correct IP", func() { @@ -82,6 +82,7 @@ var _ = Describe("Podman run with --ip flag", func() { result := podmanTest.Podman([]string{"run", "-d", "--name", "nginx", "--ip", ip, NGINX_IMAGE}) result.WaitWithDefaultTimeout() Expect(result).Should(ExitCleanly()) + cid := result.OutputToString() // This test should not use a proxy client := &http.Client{ @@ -112,7 +113,6 @@ var _ = Describe("Podman run with --ip flag", func() { } result = podmanTest.Podman([]string{"run", "--ip", ip, ALPINE, "ip", "addr"}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) - Expect(result.ErrorToString()).To(ContainSubstring(" address %s ", ip)) + Expect(result).To(ExitWithError(126, fmt.Sprintf("IPAM error: requested ip address %s is already allocated to container ID %s", ip, cid))) }) }) diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go index b75cc5fa41..95a2ab61e1 100644 --- a/test/e2e/run_test.go +++ b/test/e2e/run_test.go @@ -14,6 +14,7 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/podman/v5/libpod/define" . "github.com/containers/podman/v5/test/utils" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/stringid" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -115,17 +116,16 @@ var _ = Describe("Podman run", func() { It("podman run --signature-policy", func() { session := podmanTest.Podman([]string{"run", "--pull=always", "--signature-policy", "/no/such/file", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + if IsRemote() { + Expect(session).To(ExitWithError(125, "unknown flag: --signature-policy")) + return + } + Expect(session).To(ExitWithError(125, "open /no/such/file: no such file or directory")) session = podmanTest.Podman([]string{"run", "--pull=always", "--signature-policy", "/etc/containers/policy.json", ALPINE}) session.WaitWithDefaultTimeout() - if IsRemote() { - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("unknown flag")) - } else { - Expect(session).Should(Exit(0)) - Expect(session.ErrorToString()).To(ContainSubstring("Getting image source signatures")) - } + Expect(session).Should(Exit(0)) + Expect(session.ErrorToString()).To(ContainSubstring("Getting image source signatures")) }) It("podman run --rm with --restart", func() { @@ -143,11 +143,11 @@ var _ = Describe("Podman run", func() { session = podmanTest.Podman([]string{"run", "--rm", "--restart", "always", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `the --rm option conflicts with --restart, when the restartPolicy is not "" and "no"`)) session = podmanTest.Podman([]string{"run", "--rm", "--restart", "unless-stopped", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `the --rm option conflicts with --restart, when the restartPolicy is not "" and "no"`)) }) It("podman run a container based on on a short name with localhost", func() { @@ -232,8 +232,7 @@ var _ = Describe("Podman run", func() { run := podmanTest.Podman([]string{"run", pushedImage, "date"}) run.WaitWithDefaultTimeout() - Expect(run).Should(Exit(125)) - Expect(run.ErrorToString()).To(ContainSubstring("pinging container registry localhost:" + port)) + Expect(run).Should(ExitWithError(125, "pinging container registry localhost:"+port)) Expect(run.ErrorToString()).To(ContainSubstring("http: server gave HTTP response to HTTPS client")) run = podmanTest.Podman([]string{"run", "--tls-verify=false", pushedImage, "echo", "got here"}) @@ -393,8 +392,7 @@ var _ = Describe("Podman run", func() { if _, err := os.Stat(mask); err == nil { session = podmanTest.Podman([]string{"exec", "maskCtr", "touch", mask}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) - Expect(session.ErrorToString()).To(Equal(fmt.Sprintf("touch: %s: Read-only file system", mask))) + Expect(session).Should(ExitWithError(1, fmt.Sprintf("touch: %s: Read-only file system", mask))) } } }) @@ -456,7 +454,9 @@ var _ = Describe("Podman run", func() { }) It("podman run powercap is masked", func() { - Skip("CI VMs do not have access to powercap") + if err := fileutils.Exists("/sys/devices/virtual/powercap"); err != nil { + Skip("/sys/devices/virtual/powercap is not present") + } testCtr1 := "testctr" run := podmanTest.Podman([]string{"run", "-d", "--name", testCtr1, ALPINE, "top"}) @@ -465,7 +465,8 @@ var _ = Describe("Podman run", func() { exec := podmanTest.Podman([]string{"exec", "-ti", testCtr1, "ls", "/sys/devices/virtual/powercap"}) exec.WaitWithDefaultTimeout() - Expect(exec).To(ExitWithError()) + Expect(exec).To(ExitCleanly()) + Expect(exec.OutputToString()).To(BeEmpty(), "ls powercap without --privileged") testCtr2 := "testctr2" run2 := podmanTest.Podman([]string{"run", "-d", "--privileged", "--name", testCtr2, ALPINE, "top"}) @@ -475,7 +476,7 @@ var _ = Describe("Podman run", func() { exec2 := podmanTest.Podman([]string{"exec", "-ti", testCtr2, "ls", "/sys/devices/virtual/powercap"}) exec2.WaitWithDefaultTimeout() Expect(exec2).Should(ExitCleanly()) - Expect(exec2.OutputToString()).Should(Not(BeEmpty())) + Expect(exec2.OutputToString()).Should(Not(BeEmpty()), "ls powercap with --privileged") }) It("podman run security-opt unmask on /sys/fs/cgroup", func() { @@ -527,15 +528,13 @@ var _ = Describe("Podman run", func() { cmd = append(secOpts, cmd...) session = podmanTest.Podman(append([]string{"run"}, cmd...)) session.WaitWithDefaultTimeout() - Expect(session).To(Exit(1)) - Expect(session.ErrorToString()).To(ContainSubstring("ln: /linkNotAllowed: Operation not permitted")) + Expect(session).To(ExitWithError(1, "ln: /linkNotAllowed: Operation not permitted")) // ...even with --privileged cmd = append([]string{"--privileged"}, cmd...) session = podmanTest.Podman(append([]string{"run"}, cmd...)) session.WaitWithDefaultTimeout() - Expect(session).To(Exit(1)) - Expect(session.ErrorToString()).To(ContainSubstring("ln: /linkNotAllowed: Operation not permitted")) + Expect(session).To(ExitWithError(1, "ln: /linkNotAllowed: Operation not permitted")) }) It("podman run seccomp test --privileged no profile should be unconfined", func() { @@ -718,10 +717,10 @@ USER bin`, BB) Expect(session).Should(ExitCleanly()) } - session = podmanTest.Podman([]string{"run", "--rm", "--oom-score-adj=111", fedoraMinimal, "cat", "/proc/self/oom_score_adj"}) + session = podmanTest.Podman([]string{"run", "--rm", "--oom-score-adj=999", fedoraMinimal, "cat", "/proc/self/oom_score_adj"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).To(Equal("111")) + Expect(session.OutputToString()).To(Equal("999")) currentOOMScoreAdj, err := os.ReadFile("/proc/self/oom_score_adj") Expect(err).ToNot(HaveOccurred()) @@ -740,6 +739,11 @@ USER bin`, BB) It("podman run limits host test", func() { SkipIfRemote("This can only be used for local tests") + info := GetHostDistributionInfo() + if info.Distribution == "debian" { + // "expected 1048576 to be >= 1073741816" + Skip("FIXME 2024-05-28 fails on debian, maybe because of systemd 256?") + } var l syscall.Rlimit @@ -758,11 +762,19 @@ USER bin`, BB) }) It("podman run with cidfile", func() { - session := podmanTest.Podman([]string{"run", "--cidfile", tempdir + "cidfile", ALPINE, "ls"}) + cidFile := filepath.Join(tempdir, "cidfile") + session := podmanTest.Podman([]string{"run", "--name", "cidtest", "--cidfile", cidFile, CITEST_IMAGE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - err := os.Remove(tempdir + "cidfile") + + cidFromFile, err := os.ReadFile(cidFile) Expect(err).ToNot(HaveOccurred()) + + session = podmanTest.Podman([]string{"inspect", "--format", "{{.Id}}", "cidtest"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(ExitCleanly()) + + Expect(string(cidFromFile)).To(Equal(session.OutputToString()), "CID from cidfile == CID from podman inspect") }) It("podman run sysctl test", func() { @@ -775,7 +787,7 @@ USER bin`, BB) // network sysctls should fail if --net=host is set session = podmanTest.Podman([]string{"run", "--net", "host", "--rm", "--sysctl", "net.core.somaxconn=65535", ALPINE, "sysctl", "net.core.somaxconn"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "sysctl net.core.somaxconn=65535 can't be set since Network Namespace set to host: invalid argument")) }) It("podman run blkio-weight test", func() { @@ -924,7 +936,7 @@ USER bin`, BB) It("podman test hooks", func() { SkipIfRemote("--hooks-dir does not work with remote") - hooksDir := tempdir + "/hooks,withcomma" + hooksDir := filepath.Join(tempdir, "hooks,withcomma") err := os.Mkdir(hooksDir, 0755) Expect(err).ToNot(HaveOccurred()) hookJSONPath := filepath.Join(hooksDir, "checkhooks.json") @@ -977,7 +989,7 @@ echo -n %s >%s err = os.WriteFile(secretsFile, []byte(secretsString), 0755) Expect(err).ToNot(HaveOccurred()) - targetDir := tempdir + "/symlink/target" + targetDir := filepath.Join(tempdir, "symlink/target") err = os.MkdirAll(targetDir, 0755) Expect(err).ToNot(HaveOccurred()) keyFile := filepath.Join(targetDir, "key.pem") @@ -1110,19 +1122,19 @@ echo -n %s >%s It("podman run attach nonsense errors", func() { session := podmanTest.Podman([]string{"run", "--rm", "--attach", "asdfasdf", ALPINE, "ls", "/"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, `invalid stream "asdfasdf" for --attach - must be one of stdin, stdout, or stderr: invalid argument`)) }) It("podman run exit code on failure to exec", func() { session := podmanTest.Podman([]string{"run", ALPINE, "/etc"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(126)) + Expect(session).Should(ExitWithError(126, "open executable: Operation not permitted: OCI permission denied")) }) It("podman run error on exec", func() { session := podmanTest.Podman([]string{"run", ALPINE, "sh", "-c", "exit 100"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(100)) + Expect(session).Should(ExitWithError(100, "")) }) It("podman run with named volume", func() { @@ -1206,8 +1218,7 @@ USER mail`, BB) // check that the read-only option works session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID + ":ro", ALPINE, "touch", mountpoint + "abc.txt"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) - Expect(session.ErrorToString()).To(ContainSubstring("Read-only file system")) + Expect(session).Should(ExitWithError(1, "Read-only file system")) // check that both z and ro options work session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID + ":ro,z", ALPINE, "cat", mountpoint + filename}) @@ -1218,14 +1229,12 @@ USER mail`, BB) // check that multiple ro/rw are not working session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID + ":ro,rw", ALPINE, "cat", mountpoint + filename}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("cannot set ro or rw options more than once")) + Expect(session).Should(ExitWithError(125, "cannot set ro or rw options more than once")) // check that multiple z options are not working session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID + ":z,z,ro", ALPINE, "cat", mountpoint + filename}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("cannot set :z more than once in mount options")) + Expect(session).Should(ExitWithError(125, "cannot set :z more than once in mount options")) // create new read-only volume session = podmanTest.Podman([]string{"create", "--volume", vol + ":" + mountpoint + ":ro", ALPINE, "cat", mountpoint + filename}) @@ -1236,8 +1245,7 @@ USER mail`, BB) // check if the original volume was mounted as read-only that --volumes-from also mount it as read-only session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID, ALPINE, "touch", mountpoint + "abc.txt"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) - Expect(session.ErrorToString()).To(ContainSubstring("Read-only file system")) + Expect(session).Should(ExitWithError(1, "Read-only file system")) }) It("podman run --volumes-from flag with built-in volumes", func() { @@ -1300,12 +1308,10 @@ VOLUME %s`, ALPINE, volPath, volPath) session := podmanTest.Podman([]string{"run", "--volume", ":/myvol1:z", ALPINE, "touch", "/myvol2/foo.txt"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("directory cannot be empty")) + Expect(session).To(ExitWithError(125, "host directory cannot be empty")) session = podmanTest.Podman([]string{"run", "--volume", vol1 + ":", ALPINE, "touch", "/myvol2/foo.txt"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("directory cannot be empty")) + Expect(session).To(ExitWithError(125, "container directory cannot be empty")) }) It("podman run --mount flag with multiple mounts", func() { @@ -1418,7 +1424,7 @@ VOLUME %s`, ALPINE, volPath, volPath) Expect(session).Should(ExitCleanly()) session = podmanTest.Podman([]string{"wait", "test"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `no container with name or ID "test" found: no such container`)) numContainers := podmanTest.NumberOfContainers() Expect(numContainers).To(Equal(0)) @@ -1427,23 +1433,23 @@ VOLUME %s`, ALPINE, volPath, volPath) It("podman run --rm failed container should delete itself", func() { session := podmanTest.Podman([]string{"run", "--name", "test", "--rm", ALPINE, "foo"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(127, "not found in $PATH")) session = podmanTest.Podman([]string{"wait", "test"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `no container with name or ID "test" found: no such container`)) numContainers := podmanTest.NumberOfContainers() Expect(numContainers).To(Equal(0)) }) It("podman run failed container should NOT delete itself", func() { - session := podmanTest.Podman([]string{"run", ALPINE, "foo"}) + session := podmanTest.Podman([]string{"run", "--name", "test", ALPINE, "foo"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(127, "not found in $PATH")) // If remote we could have a race condition session = podmanTest.Podman([]string{"wait", "test"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitCleanly()) numContainers := podmanTest.NumberOfContainers() Expect(numContainers).To(Equal(1)) @@ -1467,28 +1473,25 @@ VOLUME %s`, ALPINE, volPath, volPath) It("podman run with bad healthcheck retries", func() { session := podmanTest.Podman([]string{"run", "-dt", "--health-cmd", "[\"foo\"]", "--health-retries", "0", ALPINE, "top"}) session.Wait() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("healthcheck-retries must be greater than 0")) + Expect(session).To(ExitWithError(125, "healthcheck-retries must be greater than 0")) }) It("podman run with bad healthcheck timeout", func() { session := podmanTest.Podman([]string{"run", "-dt", "--health-cmd", "foo", "--health-timeout", "0s", ALPINE, "top"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("healthcheck-timeout must be at least 1 second")) + Expect(session).To(ExitWithError(125, "healthcheck-timeout must be at least 1 second")) }) It("podman run with bad healthcheck start-period", func() { session := podmanTest.Podman([]string{"run", "-dt", "--health-cmd", "foo", "--health-start-period", "-1s", ALPINE, "top"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("healthcheck-start-period must be 0 seconds or greater")) + Expect(session).To(ExitWithError(125, "healthcheck-start-period must be 0 seconds or greater")) }) It("podman run with --add-host and --no-hosts fails", func() { session := podmanTest.Podman([]string{"run", "-dt", "--add-host", "test1:127.0.0.1", "--no-hosts", ALPINE, "top"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "--no-hosts and --add-host cannot be set together")) }) It("podman run with restart-policy always restarts containers", func() { @@ -1664,13 +1667,13 @@ VOLUME %s`, ALPINE, volPath, volPath) It("podman run with cgroups=garbage errors", func() { session := podmanTest.Podman([]string{"run", "-d", "--cgroups=garbage", ALPINE, "top"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `running container create option: invalid cgroup mode "garbage": invalid argument`)) }) It("podman run should fail with nonexistent authfile", func() { session := podmanTest.Podman([]string{"run", "--authfile", "/tmp/nonexistent", ALPINE, "ls"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "credential file is not accessible: faccessat /tmp/nonexistent: no such file or directory")) }) It("podman run --device-cgroup-rule", func() { @@ -1684,12 +1687,24 @@ VOLUME %s`, ALPINE, volPath, volPath) Expect(session).Should(ExitCleanly()) }) + It("podman run --device and --privileged", func() { + session := podmanTest.Podman([]string{"run", "--device", "/dev/null:/dev/testdevice", "--privileged", ALPINE, "ls", "/dev"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(ExitCleanly()) + Expect(session.OutputToString()).To(ContainSubstring(" testdevice "), "our custom device") + // assumes that /dev/mem always exists + Expect(session.OutputToString()).To(ContainSubstring(" mem "), "privileged device") + + session = podmanTest.Podman([]string{"run", "--device", "invalid-device", "--privileged", ALPINE, "ls", "/dev"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(ExitWithError(125, "stat invalid-device: no such file or directory")) + }) + It("podman run --replace", func() { // Make sure we error out with --name. session := podmanTest.Podman([]string{"create", "--replace", ALPINE, "/bin/sh"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("cannot replace container without --name being set")) + Expect(session).Should(ExitWithError(125, "cannot replace container without --name being set")) // Run and replace 5 times in a row the "same" container. ctrName := "testCtr" @@ -1719,8 +1734,7 @@ VOLUME %s`, ALPINE, volPath, volPath) It("podman run --preserve-fds invalid fd", func() { session := podmanTest.Podman([]string{"run", "--preserve-fds", "2", ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("file descriptor 3 is not available")) + Expect(session).To(ExitWithError(125, "file descriptor 3 is not available - the preserve-fds option requires that file descriptors must be passed")) }) It("podman run --privileged and --group-add", func() { @@ -1748,9 +1762,7 @@ VOLUME %s`, ALPINE, volPath, volPath) badTZFile := fmt.Sprintf("../../../%s", tzFile) session := podmanTest.Podman([]string{"run", "--tz", badTZFile, "--rm", ALPINE, "date"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To( - Equal("Error: running container create option: finding timezone: time: invalid location name")) + Expect(session).To(ExitWithError(125, "running container create option: finding timezone: time: invalid location name")) session = podmanTest.Podman([]string{"run", "--tz", "Pacific/Honolulu", "--rm", ALPINE, "date", "+'%H %Z'"}) session.WaitWithDefaultTimeout() @@ -1804,8 +1816,7 @@ VOLUME %s`, ALPINE, volPath, volPath) session = podmanTest.Podman([]string{"run", "--umask", "9999", "--rm", ALPINE, "sh", "-c", "umask"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("invalid umask")) + Expect(session).To(ExitWithError(125, "invalid umask string 9999: invalid argument")) }) It("podman run makes workdir from image", func() { @@ -1845,7 +1856,7 @@ WORKDIR /madethis`, BB) It("podman run a container with --pull never should fail if no local store", func() { session := podmanTest.Podman([]string{"run", "--pull", "never", "docker.io/library/debian:latest", "ls"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "Error: docker.io/library/debian:latest: image not known")) }) It("podman run container with --pull missing and only pull once", func() { @@ -2055,27 +2066,27 @@ WORKDIR /madethis`, BB) // Invalid type session = podmanTest.Podman([]string{"run", "--secret", "source=mysecret,type=other", "--name", "secr", ALPINE, "printenv", "mysecret"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "type other is invalid: parsing secret")) // Invalid option session = podmanTest.Podman([]string{"run", "--secret", "source=mysecret,invalid=invalid", "--name", "secr", ALPINE, "printenv", "mysecret"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "option invalid=invalid invalid: parsing secret")) // Option syntax not valid session = podmanTest.Podman([]string{"run", "--secret", "source=mysecret,type", "--name", "secr", ALPINE, "printenv", "mysecret"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "option type must be in form option=value: parsing secret")) // mount option with env type session = podmanTest.Podman([]string{"run", "--secret", "source=mysecret,type=env,uid=1000", "--name", "secr", ALPINE, "printenv", "mysecret"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "UID, GID, Mode options cannot be set with secret type env: parsing secret")) // No source given session = podmanTest.Podman([]string{"run", "--secret", "type=env", "--name", "secr", ALPINE, "printenv", "mysecret"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `no secret with name or id "type=env": no such secret`)) }) It("podman run --requires", func() { @@ -2103,7 +2114,7 @@ WORKDIR /madethis`, BB) It("podman run with pidfile", func() { SkipIfRemote("pidfile not handled by remote") - pidfile := tempdir + "pidfile" + pidfile := filepath.Join(tempdir, "pidfile") session := podmanTest.Podman([]string{"run", "--pidfile", pidfile, ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) @@ -2147,9 +2158,9 @@ WORKDIR /madethis`, BB) podmanTest.AddImageToRWStore(ALPINE) - lock := GetPortLock("5000") + lock := GetPortLock("5006") defer lock.Unlock() - session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", "5000:5000", REGISTRY_IMAGE, "/entrypoint.sh", "/etc/docker/registry/config.yml"}) + session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", "5006:5000", REGISTRY_IMAGE, "/entrypoint.sh", "/etc/docker/registry/config.yml"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) @@ -2162,7 +2173,7 @@ WORKDIR /madethis`, BB) publicKeyFileName, privateKeyFileName, err := WriteRSAKeyPair(keyFileName, bitSize) Expect(err).ToNot(HaveOccurred()) - imgPath := "localhost:5000/my-alpine" + imgPath := "localhost:5006/my-alpine" session = podmanTest.Podman([]string{"push", "--encryption-key", "jwe:" + publicKeyFileName, "--tls-verify=false", "--remove-signatures", ALPINE, imgPath}) session.WaitWithDefaultTimeout() @@ -2171,21 +2182,25 @@ WORKDIR /madethis`, BB) Expect(session).Should(ExitCleanly()) // Must fail without --decryption-key - // NOTE: --tls-verify=false not needed, because localhost:5000 is in registries.conf - session = podmanTest.Podman([]string{"run", imgPath}) + session = podmanTest.Podman([]string{"run", "--tls-verify=false", imgPath}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("Trying to pull " + imgPath)) + Expect(session).Should(ExitWithError(125, "Trying to pull "+imgPath)) Expect(session.ErrorToString()).To(ContainSubstring("invalid tar header")) // With - session = podmanTest.Podman([]string{"run", "--decryption-key", privateKeyFileName, imgPath}) + session = podmanTest.Podman([]string{"run", "--tls-verify=false", "--decryption-key", privateKeyFileName, imgPath}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) Expect(session.ErrorToString()).To(ContainSubstring("Trying to pull " + imgPath)) }) It("podman run --shm-size-systemd", func() { + // FIXME Failed to set RLIMIT_CORE: Operation not permitted + info := GetHostDistributionInfo() + if info.Distribution == "debian" { + Skip("FIXME 2024-05-28 fails on debian, maybe because of systemd 256?") + } + ctrName := "testShmSizeSystemd" run := podmanTest.Podman([]string{"run", "--name", ctrName, "--shm-size-systemd", "10mb", "-d", SYSTEMD_IMAGE, "/sbin/init"}) run.WaitWithDefaultTimeout() diff --git a/test/e2e/run_userns_test.go b/test/e2e/run_userns_test.go index 311a6f1c41..ffb0ef11d8 100644 --- a/test/e2e/run_userns_test.go +++ b/test/e2e/run_userns_test.go @@ -12,7 +12,6 @@ import ( "github.com/containers/storage" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) func createContainersConfFileWithCustomUserns(pTest *PodmanTestIntegration, userns string) { @@ -371,13 +370,11 @@ var _ = Describe("Podman UserNS support", func() { It("podman --userns= conflicts with ui[dg]map and sub[ug]idname", func() { session := podmanTest.Podman([]string{"run", "--userns=host", "--uidmap=0:1:500", "alpine", "true"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("--userns and --uidmap/--gidmap/--subuidname/--subgidname are mutually exclusive")) + Expect(session).Should(ExitWithError(125, "--userns and --uidmap/--gidmap/--subuidname/--subgidname are mutually exclusive")) session = podmanTest.Podman([]string{"run", "--userns=host", "--gidmap=0:200:5000", "alpine", "true"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("--userns and --uidmap/--gidmap/--subuidname/--subgidname are mutually exclusive")) + Expect(session).Should(ExitWithError(125, "--userns and --uidmap/--gidmap/--subuidname/--subgidname are mutually exclusive")) // with sub[ug]idname we don't check for the error output since the error message could be different, depending on the // system configuration since the specified user could not be defined and cause a different earlier error. @@ -422,4 +419,41 @@ var _ = Describe("Podman UserNS support", func() { podmanTest.RestartRemoteService() } }) + + It("podman pod userns inherited for containers", func() { + podName := "testPod" + podIDFile := filepath.Join(podmanTest.TempDir, "podid") + podCreate := podmanTest.Podman([]string{"pod", "create", "--pod-id-file", podIDFile, "--uidmap", "0:0:1000", "--name", podName}) + podCreate.WaitWithDefaultTimeout() + Expect(podCreate).Should(ExitCleanly()) + + // The containers should not use PODMAN_USERNS as they must inherited the userns from the pod. + os.Setenv("PODMAN_USERNS", "keep-id") + defer os.Unsetenv("PODMAN_USERNS") + + expectedMapping := ` 0 0 1000 + 0 0 1000 +` + // rootless mapping is split in two ranges + if isRootless() { + expectedMapping = ` 0 0 1 + 1 1 999 + 0 0 1 + 1 1 999 +` + } + + session := podmanTest.Podman([]string{"run", "--pod", podName, ALPINE, "cat", "/proc/self/uid_map", "/proc/self/gid_map"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(ExitCleanly()) + output := string(session.Out.Contents()) + Expect(output).To(Equal(expectedMapping)) + + // https://github.com/containers/podman/issues/22931 + session = podmanTest.Podman([]string{"run", "--pod-id-file", podIDFile, ALPINE, "cat", "/proc/self/uid_map", "/proc/self/gid_map"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(ExitCleanly()) + output = string(session.Out.Contents()) + Expect(output).To(Equal(expectedMapping)) + }) }) diff --git a/test/e2e/run_volume_test.go b/test/e2e/run_volume_test.go index 4e777d62ef..f8dc22b1ed 100644 --- a/test/e2e/run_volume_test.go +++ b/test/e2e/run_volume_test.go @@ -30,6 +30,14 @@ var _ = Describe("Podman run with volumes", func() { return strings.Fields(session.OutputToString()) } + //nolint:unparam + mountVolumeAndCheckDirectory := func(volName, volPath, expectedOwner, imgName string) { + check := podmanTest.Podman([]string{"run", "-v", fmt.Sprintf("%s:%s", volName, volPath), imgName, "stat", "-c", "%U:%G", volPath}) + check.WaitWithDefaultTimeout() + Expect(check).Should(ExitCleanly()) + Expect(check.OutputToString()).Should(ContainSubstring(fmt.Sprintf("%s:%s", expectedOwner, expectedOwner))) + } + It("podman run with volume flag", func() { mountPath := filepath.Join(podmanTest.TempDir, "secrets") err = os.Mkdir(mountPath, 0755) @@ -97,7 +105,7 @@ var _ = Describe("Podman run with volumes", func() { session = podmanTest.Podman([]string{"run", "--rm", "--mount", "type=tmpfs,target=/etc/ssl,tmpcopyup,notmpcopyup", ALPINE, "ls", "/etc/ssl"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "cannot pass 'tmpcopyup' and 'notmpcopyup' mnt.Options more than once: must provide an argument for option")) // test csv escaping session = podmanTest.Podman([]string{"run", "--rm", "--mount=type=tmpfs,tmpfs-size=512M,\"destination=/test,\"", ALPINE, "ls", "/test,"}) @@ -106,11 +114,11 @@ var _ = Describe("Podman run with volumes", func() { session = podmanTest.Podman([]string{"run", "--rm", "--mount", "type=bind,src=/tmp,target=/tmp,tmpcopyup", ALPINE, "true"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `"tmpcopyup" option not supported for "bind" mount types`)) session = podmanTest.Podman([]string{"run", "--rm", "--mount", "type=bind,src=/tmp,target=/tmp,notmpcopyup", ALPINE, "true"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `"notmpcopyup" option not supported for "bind" mount types`)) session = podmanTest.Podman([]string{"run", "--rm", "--mount", "type=tmpfs,target=/etc/ssl,notmpcopyup", ALPINE, "ls", "/etc/ssl"}) session.WaitWithDefaultTimeout() @@ -124,7 +132,7 @@ var _ = Describe("Podman run with volumes", func() { Expect(err).ToNot(HaveOccurred()) session := podmanTest.Podman([]string{"run", "-v", mountPath + ":" + dest, "-v", "/tmp" + ":" + dest, ALPINE, "ls"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, fmt.Sprintf("%s: duplicate mount destination", dest))) }) It("podman run with conflict between image volume and user mount succeeds", func() { @@ -161,7 +169,7 @@ var _ = Describe("Podman run with volumes", func() { session = podmanTest.Podman([]string{"run", "--rm", "--mount", mount + ",ro=true,rw=false", ALPINE, "grep", dest, "/proc/self/mountinfo"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "cannot pass 'readonly', 'ro', or 'rw' mnt.Options more than once: must provide an argument for option")) }) It("podman run with volume flag and multiple named volumes", func() { @@ -173,7 +181,20 @@ var _ = Describe("Podman run with volumes", func() { }) It("podman run with volumes and suid/dev/exec options", func() { - SkipIfRemote("podman-remote does not support --volumes") + if isRootless() { + // We cannot undo nosuid,nodev,noexec when running rootless for obvious reasons. + // Thus we should check first if our source dir contains such options and skip the test int his case + session := SystemExec("findmnt", []string{"-n", "-o", "OPTIONS", "--target", podmanTest.TempDir}) + session.WaitWithDefaultTimeout() + Expect(session).To(ExitCleanly()) + output := session.OutputToString() + if strings.Contains(output, "noexec") || + strings.Contains(output, "nodev") || + strings.Contains(output, "nosuid") { + Skip("test file system is mounted noexec, nodev or nosuid - cannot bind mount without these options as rootless") + } + } + mountPath := filepath.Join(podmanTest.TempDir, "secrets") err := os.Mkdir(mountPath, 0755) Expect(err).ToNot(HaveOccurred()) @@ -201,11 +222,6 @@ var _ = Describe("Podman run with volumes", func() { if os.Getenv("container") != "" { Skip("Overlay mounts not supported when running in a container") } - if isRootless() { - if _, err := exec.LookPath("fuse-overlayfs"); err != nil { - Skip("Fuse-Overlayfs required for rootless overlay mount test") - } - } mountPath := filepath.Join(podmanTest.TempDir, "secrets") err := os.Mkdir(mountPath, 0755) Expect(err).ToNot(HaveOccurred()) @@ -221,11 +237,6 @@ var _ = Describe("Podman run with volumes", func() { if os.Getenv("container") != "" { Skip("Overlay mounts not supported when running in a container") } - if isRootless() { - if _, err := exec.LookPath("fuse-overlayfs"); err != nil { - Skip("Fuse-Overlayfs required for rootless overlay mount test") - } - } session := podmanTest.Podman([]string{"volume", "create", "myvolume"}) session.WaitWithDefaultTimeout() volName := session.OutputToString() @@ -254,11 +265,6 @@ var _ = Describe("Podman run with volumes", func() { if os.Getenv("container") != "" { Skip("Overlay mounts not supported when running in a container") } - if isRootless() { - if _, err := exec.LookPath("fuse-overlayfs"); err != nil { - Skip("Fuse-Overlayfs required for rootless overlay mount test") - } - } // create persistent upperdir on host upperDir := filepath.Join(tempdir, "upper") @@ -308,11 +314,6 @@ var _ = Describe("Podman run with volumes", func() { if os.Getenv("container") != "" { Skip("Overlay mounts not supported when running in a container") } - if isRootless() { - if _, err := exec.LookPath("fuse-overlayfs"); err != nil { - Skip("Fuse-Overlayfs required for rootless overlay mount test") - } - } // Use bindsource instead of named volume bindSource := filepath.Join(tempdir, "bindsource") @@ -351,7 +352,7 @@ var _ = Describe("Podman run with volumes", func() { It("podman run with noexec can't exec", func() { session := podmanTest.Podman([]string{"run", "--rm", "-v", "/bin:/hostbin:noexec", ALPINE, "/hostbin/ls", "/"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(126, "ermission denied")) }) It("podman run with tmpfs named volume mounts and unmounts", func() { @@ -429,7 +430,7 @@ var _ = Describe("Podman run with volumes", func() { noCopySession := podmanTest.Podman([]string{"run", "--rm", "-v", "testvol4:/etc/apk:nocopy", ALPINE, "stat", "-c", "%h", "/etc/apk/arch"}) noCopySession.WaitWithDefaultTimeout() - Expect(noCopySession).Should(Exit(1)) + Expect(noCopySession).Should(ExitWithError(1, "stat: can't stat '/etc/apk/arch': No such file or directory")) }) It("podman named volume copyup symlink", func() { @@ -576,7 +577,7 @@ RUN sh -c "cd /etc/apk && ln -s ../../testfile"`, ALPINE) volMount := podmanTest.Podman([]string{"run", "--rm", "-v", fmt.Sprintf("%s:/tmp", volName), ALPINE, "ls"}) volMount.WaitWithDefaultTimeout() - Expect(volMount).To(ExitWithError()) + Expect(volMount).To(ExitWithError(126, "mounting volume testVol for container ")) }) It("Podman fix for CVE-2020-1726", func() { @@ -626,11 +627,6 @@ VOLUME /test/`, ALPINE) if os.Getenv("container") != "" { Skip("Overlay mounts not supported when running in a container") } - if isRootless() { - if _, err := exec.LookPath("fuse-overlayfs"); err != nil { - Skip("Fuse-Overlayfs required for rootless overlay mount test") - } - } mountPath := filepath.Join(podmanTest.TempDir, "secrets") err := os.Mkdir(mountPath, 0755) Expect(err).ToNot(HaveOccurred()) @@ -679,7 +675,7 @@ VOLUME /test/`, ALPINE) Expect(session).Should(ExitCleanly()) session = podmanTest.Podman([]string{"exec", "-l", "ls", "/run/test/container"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(1, "ls: /run/test/container: No such file or directory")) }) It("overlay volume conflicts with named volume and mounts", func() { @@ -700,18 +696,20 @@ VOLUME /test/`, ALPINE) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) + expectErr := fmt.Sprintf("%s: duplicate mount destination", mountDest) + // overlay and named volume destinations conflict session = podmanTest.Podman([]string{"run", "--rm", "-v", fmt.Sprintf("%s:%s:O", mountPath, mountDest), "-v", fmt.Sprintf("%s:%s", volName, mountDest), ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, expectErr)) // overlay and bind mount destinations conflict session = podmanTest.Podman([]string{"run", "--rm", "-v", fmt.Sprintf("%s:%s:O", mountPath, mountDest), "--mount", fmt.Sprintf("type=bind,src=%s,target=%s", mountSrc, mountDest), ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, expectErr)) // overlay and tmpfs mount destinations conflict session = podmanTest.Podman([]string{"run", "--rm", "-v", fmt.Sprintf("%s:%s:O", mountPath, mountDest), "--mount", fmt.Sprintf("type=tmpfs,target=%s", mountDest), ALPINE}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, expectErr)) }) It("same volume in multiple places does not deadlock", func() { @@ -740,15 +738,6 @@ VOLUME /test/`, ALPINE) Skip("cannot find mappings for the current user") } - if os.Getenv("container") != "" { - Skip("Overlay mounts not supported when running in a container") - } - if isRootless() { - if _, err := exec.LookPath("fuse_overlay"); err != nil { - Skip("Fuse-Overlayfs required for rootless overlay mount test") - } - } - mountPath := filepath.Join(podmanTest.TempDir, "secrets") err = os.Mkdir(mountPath, 0755) Expect(err).ToNot(HaveOccurred()) @@ -759,6 +748,12 @@ VOLUME /test/`, ALPINE) Expect(session).Should(ExitCleanly()) Expect(session.OutputToString()).To(ContainSubstring("888:888")) + // test with an existing directory in the image + session = podmanTest.Podman([]string{"run", "--rm", "--user", "881:882", "-v", "NAMED-VOLUME:/mnt:U", ALPINE, "stat", "-c", "%u:%g", "/mnt"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(ExitCleanly()) + Expect(session.OutputToString()).To(ContainSubstring("881:882")) + session = podmanTest.Podman([]string{"run", "--rm", "--user", "888:888", "--userns", "auto", "-v", vol, ALPINE, "stat", "-c", "%u:%g", dest}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) @@ -803,7 +798,7 @@ VOLUME /test/`, ALPINE) vol = "type=bind,src=" + mountPath + ",dst=" + dest + ",U=invalid" session = podmanTest.Podman([]string{"run", "--rm", "--user", "888:888", "--mount", vol, ALPINE, "stat", "-c", "%u:%g", dest}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `'U' or 'chown' must be set to true or false, instead received "invalid": must provide an argument for option`)) // true bind mount vol = "type=bind,src=" + mountPath + ",dst=" + dest + ",U=true" @@ -962,4 +957,110 @@ USER testuser`, CITEST_IMAGE) Expect(run).Should(ExitCleanly()) Expect(run.OutputToString()).Should(ContainSubstring(strings.TrimLeft("/vol/", f.Name()))) }) + + It("podman run --mount type=image with subpath", func() { + podmanTest.AddImageToRWStore(ALPINE) + + pathToCheck := "/sbin" + pathInCtr := "/mnt" + + ctrCommand := []string{"run", "--mount", fmt.Sprintf("type=image,source=%s,dst=%s,subpath=%s", ALPINE, pathInCtr, pathToCheck), ALPINE, "ls"} + + run1Cmd := append(ctrCommand, pathToCheck) + run1 := podmanTest.Podman(run1Cmd) + run1.WaitWithDefaultTimeout() + Expect(run1).Should(ExitCleanly()) + + run2Cmd := append(ctrCommand, pathInCtr) + run2 := podmanTest.Podman(run2Cmd) + run2.WaitWithDefaultTimeout() + Expect(run2).Should(ExitCleanly()) + + Expect(run1.OutputToString()).Should(Equal(run2.OutputToString())) + }) + + It("podman run -v chowns multiple times on empty volume", func() { + imgName := "testimg" + dockerfile := fmt.Sprintf(`FROM %s +RUN addgroup -g 1234 test1 +RUN addgroup -g 4567 test2 +RUN addgroup -g 7890 test3 +RUN adduser -D -u 1234 -G test1 test1 +RUN adduser -D -u 4567 -G test2 test2 +RUN adduser -D -u 7890 -G test3 test3 +RUN mkdir /test1 /test2 /test3 /test4 +RUN chown test1:test1 /test1 +RUN chown test2:test2 /test2 +RUN chown test3:test3 /test4 +RUN chmod 755 /test1 /test2 /test3 /test4`, ALPINE) + podmanTest.BuildImage(dockerfile, imgName, "false") + + volName := "testVol" + volCreate := podmanTest.Podman([]string{"volume", "create", volName}) + volCreate.WaitWithDefaultTimeout() + Expect(volCreate).Should(ExitCleanly()) + + mountVolumeAndCheckDirectory(volName, "/test1", "test1", imgName) + mountVolumeAndCheckDirectory(volName, "/test2", "test2", imgName) + mountVolumeAndCheckDirectory(volName, "/test3", "root", imgName) + mountVolumeAndCheckDirectory(volName, "/test4", "root", imgName) + }) + + It("podman run -v chowns until copy-up on volume", func() { + imgName := "testimg" + dockerfile := fmt.Sprintf(`FROM %s +RUN addgroup -g 1234 test1 +RUN addgroup -g 4567 test2 +RUN addgroup -g 7890 test3 +RUN adduser -D -u 1234 -G test1 test1 +RUN adduser -D -u 4567 -G test2 test2 +RUN adduser -D -u 7890 -G test3 test3 +RUN mkdir /test1 /test2 /test3 +RUN touch /test2/file1 +RUN chown test1:test1 /test1 +RUN chown -R test2:test2 /test2 +RUN chown test3:test3 /test3 +RUN chmod 755 /test1 /test2 /test3`, ALPINE) + podmanTest.BuildImage(dockerfile, imgName, "false") + + volName := "testVol" + volCreate := podmanTest.Podman([]string{"volume", "create", volName}) + volCreate.WaitWithDefaultTimeout() + Expect(volCreate).Should(ExitCleanly()) + + mountVolumeAndCheckDirectory(volName, "/test1", "test1", imgName) + mountVolumeAndCheckDirectory(volName, "/test2", "test2", imgName) + mountVolumeAndCheckDirectory(volName, "/test3", "test2", imgName) + }) + + It("podman run -v chowns until volume has contents", func() { + imgName := "testimg" + dockerfile := fmt.Sprintf(`FROM %s +RUN addgroup -g 1234 test1 +RUN addgroup -g 4567 test2 +RUN addgroup -g 7890 test3 +RUN adduser -D -u 1234 -G test1 test1 +RUN adduser -D -u 4567 -G test2 test2 +RUN adduser -D -u 7890 -G test3 test3 +RUN mkdir /test1 /test2 /test3 +RUN chown test1:test1 /test1 +RUN chown test2:test2 /test2 +RUN chown test3:test3 /test3 +RUN chmod 755 /test1 /test2 /test3`, ALPINE) + podmanTest.BuildImage(dockerfile, imgName, "false") + + volName := "testVol" + volCreate := podmanTest.Podman([]string{"volume", "create", volName}) + volCreate.WaitWithDefaultTimeout() + Expect(volCreate).Should(ExitCleanly()) + + mountVolumeAndCheckDirectory(volName, "/test1", "test1", imgName) + mountVolumeAndCheckDirectory(volName, "/test2", "test2", imgName) + + session := podmanTest.Podman([]string{"run", "-v", fmt.Sprintf("%s:/test2", volName), imgName, "touch", "/test2/file1"}) + session.WaitWithDefaultTimeout() + Expect(session).To(ExitCleanly()) + + mountVolumeAndCheckDirectory(volName, "/test3", "test2", imgName) + }) }) diff --git a/test/e2e/run_working_dir_test.go b/test/e2e/run_working_dir_test.go index 9f06df9313..bc5deaff64 100644 --- a/test/e2e/run_working_dir_test.go +++ b/test/e2e/run_working_dir_test.go @@ -8,7 +8,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman run", func() { @@ -23,7 +22,7 @@ var _ = Describe("Podman run", func() { It("podman run a container using non existing --workdir", func() { session := podmanTest.Podman([]string{"run", "--workdir", "/home/foobar", ALPINE, "pwd"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(126)) + Expect(session).Should(ExitWithError(126, `workdir "/home/foobar" does not exist on container `)) }) It("podman run a container using a --workdir under a bind mount", func() { diff --git a/test/e2e/runlabel_test.go b/test/e2e/runlabel_test.go index 259674d60b..db1a6d0929 100644 --- a/test/e2e/runlabel_test.go +++ b/test/e2e/runlabel_test.go @@ -64,17 +64,20 @@ var _ = Describe("podman container runlabel", func() { result.WaitWithDefaultTimeout() Expect(result).Should(ExitCleanly()) }) + It("podman container runlabel bogus label should result in non-zero exit code", func() { result := podmanTest.Podman([]string{"container", "runlabel", "RUN", ALPINE}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) + Expect(result).To(ExitWithError(125, fmt.Sprintf("cannot find the value of label: RUN in image: %s", ALPINE))) // should not panic when label missing the value or don't have the label Expect(result.OutputToString()).To(Not(ContainSubstring("panic"))) }) + It("podman container runlabel bogus label in remote image should result in non-zero exit", func() { - result := podmanTest.Podman([]string{"container", "runlabel", "RUN", "docker.io/library/ubuntu:latest"}) + remoteImage := "quay.io/libpod/testimage:00000000" + result := podmanTest.Podman([]string{"container", "runlabel", "RUN", remoteImage}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) + Expect(result).To(ExitWithError(125, fmt.Sprintf("cannot find the value of label: RUN in image: %s", remoteImage))) // should not panic when label missing the value or don't have the label Expect(result.OutputToString()).To(Not(ContainSubstring("panic"))) }) @@ -86,7 +89,7 @@ var _ = Describe("podman container runlabel", func() { // runlabel should fail with nonexistent authfile result := podmanTest.Podman([]string{"container", "runlabel", "--authfile", "/tmp/nonexistent", "RUN", image}) result.WaitWithDefaultTimeout() - Expect(result).To(ExitWithError()) + Expect(result).To(ExitWithError(125, "credential file is not accessible: faccessat /tmp/nonexistent: no such file or directory")) result = podmanTest.Podman([]string{"rmi", image}) result.WaitWithDefaultTimeout() diff --git a/test/e2e/save_test.go b/test/e2e/save_test.go index 25cc1ab4e8..7e7cdcb406 100644 --- a/test/e2e/save_test.go +++ b/test/e2e/save_test.go @@ -1,6 +1,7 @@ package integration import ( + "fmt" "os" "os/exec" "path/filepath" @@ -61,7 +62,7 @@ var _ = Describe("Podman save", func() { save := podmanTest.Podman([]string{"save", "-q", "-o", outfile, "FOOBAR"}) save.WaitWithDefaultTimeout() - Expect(save).To(ExitWithError()) + Expect(save).To(ExitWithError(125, "repository name must be lowercase")) }) It("podman save to directory with oci format", func() { @@ -101,13 +102,11 @@ var _ = Describe("Podman save", func() { save := podmanTest.Podman([]string{"save", "-q", "--compress", "--format", "docker-archive", "-o", outdir, ALPINE}) save.WaitWithDefaultTimeout() - // should not be 0 - Expect(save).To(ExitWithError()) + Expect(save).To(ExitWithError(125, "--compress can only be set when --format is 'docker-dir'")) save = podmanTest.Podman([]string{"save", "-q", "--compress", "--format", "oci-archive", "-o", outdir, ALPINE}) save.WaitWithDefaultTimeout() - // should not be 0 - Expect(save).To(ExitWithError()) + Expect(save).To(ExitWithError(125, "--compress can only be set when --format is 'docker-dir'")) }) @@ -116,7 +115,7 @@ var _ = Describe("Podman save", func() { save := podmanTest.Podman([]string{"save", "-q", "--compress", "--format", "docker-dir", "-o", outdir, ALPINE}) save.WaitWithDefaultTimeout() - Expect(save).To(ExitWithError()) + Expect(save).To(ExitWithError(125, fmt.Sprintf(`invalid filename (should not contain ':') "%s"`, outdir))) }) It("podman save remove signature", func() { @@ -133,11 +132,11 @@ var _ = Describe("Podman save", func() { Expect(err).ToNot(HaveOccurred()) defer os.Setenv("GNUPGHOME", origGNUPGHOME) - port := 5000 + port := 5005 portlock := GetPortLock(strconv.Itoa(port)) defer portlock.Unlock() - session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", strings.Join([]string{strconv.Itoa(port), strconv.Itoa(port)}, ":"), REGISTRY_IMAGE}) + session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", strings.Join([]string{strconv.Itoa(port), "5000"}, ":"), REGISTRY_IMAGE}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) if !WaitContainerReady(podmanTest, "registry", "listening on", 20, 1) { @@ -173,31 +172,32 @@ default-docker: ` Expect(os.WriteFile("/etc/containers/registries.d/default.yaml", []byte(sigstore), 0755)).To(Succeed()) - session = podmanTest.Podman([]string{"tag", ALPINE, "localhost:5000/alpine"}) + pushedImage := fmt.Sprintf("localhost:%d/alpine", port) + session = podmanTest.Podman([]string{"tag", ALPINE, pushedImage}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - session = podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--sign-by", "foo@bar.com", "localhost:5000/alpine"}) + session = podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--sign-by", "foo@bar.com", pushedImage}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - session = podmanTest.Podman([]string{"rmi", ALPINE, "localhost:5000/alpine"}) + session = podmanTest.Podman([]string{"rmi", ALPINE, pushedImage}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) if !IsRemote() { // Generate a signature verification policy file - policyPath := generatePolicyFile(podmanTest.TempDir) + policyPath := generatePolicyFile(podmanTest.TempDir, port) defer os.Remove(policyPath) - session = podmanTest.Podman([]string{"pull", "-q", "--tls-verify=false", "--signature-policy", policyPath, "localhost:5000/alpine"}) + session = podmanTest.Podman([]string{"pull", "-q", "--tls-verify=false", "--signature-policy", policyPath, pushedImage}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) outfile := filepath.Join(podmanTest.TempDir, "temp.tar") - save := podmanTest.Podman([]string{"save", "-q", "remove-signatures=true", "-o", outfile, "localhost:5000/alpine"}) + save := podmanTest.Podman([]string{"save", "-q", "remove-signatures=true", "-o", outfile, pushedImage}) save.WaitWithDefaultTimeout() - Expect(save).To(ExitWithError()) + Expect(save).To(ExitWithError(125, "invalid reference format")) } }) diff --git a/test/e2e/search_test.go b/test/e2e/search_test.go index df6a9f0d88..16bbe87a52 100644 --- a/test/e2e/search_test.go +++ b/test/e2e/search_test.go @@ -12,7 +12,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) type endpoint struct { @@ -75,19 +74,20 @@ registries = []` }) It("podman search format flag", func() { - search := podmanTest.Podman([]string{"search", "--format", "table {{.Index}} {{.Name}}", "alpine"}) + search := podmanTest.Podman([]string{"search", "--format", "table {{.Index}} {{.Name}}", "testdigest_v2s2"}) search.WaitWithDefaultTimeout() Expect(search).Should(ExitCleanly()) Expect(len(search.OutputToStringArray())).To(BeNumerically(">", 1)) - Expect(search.OutputToString()).To(ContainSubstring("docker.io/library/alpine")) + Expect(search.OutputToString()).To(ContainSubstring("quay.io/libpod/testdigest_v2s2")) }) It("podman search format json", func() { - search := podmanTest.Podman([]string{"search", "--format", "json", "busybox"}) + search := podmanTest.Podman([]string{"search", "--format", "json", "testdigest_v2s1"}) search.WaitWithDefaultTimeout() Expect(search).Should(ExitCleanly()) Expect(search.OutputToString()).To(BeValidJSON()) - Expect(search.OutputToString()).To(ContainSubstring("docker.io/library/busybox")) + Expect(search.OutputToString()).To(ContainSubstring("quay.io/libpod/testdigest_v2s1")) + Expect(search.OutputToString()).To(ContainSubstring("Test image used by buildah regression tests")) // Test for https://github.com/containers/podman/issues/11894 contents := make([]entities.ImageSearchReport, 0) @@ -123,17 +123,17 @@ registries = []` }) It("podman search limit flag", func() { - search := podmanTest.Podman([]string{"search", "docker.io/alpine"}) + search := podmanTest.Podman([]string{"search", "quay.io/alpine"}) search.WaitWithDefaultTimeout() Expect(search).Should(ExitCleanly()) Expect(len(search.OutputToStringArray())).To(BeNumerically(">", 10)) - search = podmanTest.Podman([]string{"search", "--limit", "3", "docker.io/alpine"}) + search = podmanTest.Podman([]string{"search", "--limit", "3", "quay.io/alpine"}) search.WaitWithDefaultTimeout() Expect(search).Should(ExitCleanly()) Expect(search.OutputToStringArray()).To(HaveLen(4)) - search = podmanTest.Podman([]string{"search", "--limit", "30", "docker.io/alpine"}) + search = podmanTest.Podman([]string{"search", "--limit", "30", "quay.io/alpine"}) search.WaitWithDefaultTimeout() Expect(search).Should(ExitCleanly()) Expect(search.OutputToStringArray()).To(HaveLen(31)) @@ -313,9 +313,8 @@ registries = []` search := podmanTest.Podman([]string{"search", image, "--tls-verify=true"}) search.WaitWithDefaultTimeout() - Expect(search).Should(Exit(125)) + Expect(search).Should(ExitWithError(125, fmt.Sprintf(`couldn't search registry "localhost:%d": pinging container registry localhost:%d: Get "https://localhost:%d/v2/": http: server gave HTTP response to HTTPS client`, port, port, port))) Expect(search.OutputToString()).Should(BeEmpty()) - Expect(search.ErrorToString()).To(ContainSubstring("http: server gave HTTP response to HTTPS client")) // cleanup resetRegistriesConfigEnv() @@ -358,9 +357,8 @@ registries = []` search := podmanTest.Podman([]string{"search", image}) search.WaitWithDefaultTimeout() - Expect(search).Should(Exit(125)) + Expect(search).Should(ExitWithError(125, fmt.Sprintf(`couldn't search registry "localhost:%d": pinging container registry localhost:%d: Get "https://localhost:%d/v2/": http: server gave HTTP response to HTTPS client`, port, port, port))) Expect(search.OutputToString()).Should(BeEmpty()) - Expect(search.ErrorToString()).To(ContainSubstring("http: server gave HTTP response to HTTPS client")) // cleanup resetRegistriesConfigEnv() @@ -370,7 +368,7 @@ registries = []` It("podman search fail with nonexistent --authfile", func() { search := podmanTest.Podman([]string{"search", "--authfile", "/tmp/nonexistent", ALPINE}) search.WaitWithDefaultTimeout() - Expect(search).To(ExitWithError()) + Expect(search).To(ExitWithError(125, "credential file is not accessible: faccessat /tmp/nonexistent: no such file or directory")) }) // Registry is unreliable (#18484), this is another super-common flake @@ -382,22 +380,30 @@ registries = []` }) It("podman search repository tags", func() { - search := podmanTest.Podman([]string{"search", "--list-tags", "--limit", "30", "docker.io/library/alpine"}) + search := podmanTest.Podman([]string{"search", "--list-tags", "--limit", "30", "quay.io/podman/stable"}) search.WaitWithDefaultTimeout() Expect(search).Should(ExitCleanly()) Expect(search.OutputToStringArray()).To(HaveLen(31)) - search = podmanTest.Podman([]string{"search", "--list-tags", "docker.io/library/alpine"}) + search = podmanTest.Podman([]string{"search", "--list-tags", "quay.io/podman/stable"}) search.WaitWithDefaultTimeout() Expect(search).Should(ExitCleanly()) Expect(len(search.OutputToStringArray())).To(BeNumerically(">", 2)) - search = podmanTest.Podman([]string{"search", "--filter=is-official", "--list-tags", "docker.io/library/alpine"}) + search = podmanTest.Podman([]string{"search", "--filter=is-official", "--list-tags", "quay.io/podman/stable"}) search.WaitWithDefaultTimeout() - Expect(search).To(ExitWithError()) + Expect(search).To(ExitWithError(125, "filters are not applicable to list tags result")) - search = podmanTest.Podman([]string{"search", "--list-tags", "docker.io/library/"}) + // With trailing slash + search = podmanTest.Podman([]string{"search", "--list-tags", "quay.io/podman/"}) search.WaitWithDefaultTimeout() + Expect(search).To(ExitWithError(125, `reference "podman/" must be a docker reference`)) + Expect(search.OutputToStringArray()).To(BeEmpty()) + + // No trailing slash + search = podmanTest.Podman([]string{"search", "--list-tags", "quay.io/podman"}) + search.WaitWithDefaultTimeout() + Expect(search).To(ExitWithError(125, "getting repository tags: fetching tags list: StatusCode: 404")) Expect(search.OutputToStringArray()).To(BeEmpty()) }) diff --git a/test/e2e/secret_test.go b/test/e2e/secret_test.go index ceffb527d6..71d5fc34bf 100644 --- a/test/e2e/secret_test.go +++ b/test/e2e/secret_test.go @@ -9,7 +9,6 @@ import ( "github.com/containers/storage/pkg/stringid" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman secret", func() { @@ -39,8 +38,7 @@ var _ = Describe("Podman secret", func() { session = podmanTest.Podman([]string{"secret", "create", "-d", "file", "--driver-opts", "opt1=val1", "a", secretFilePath}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(Equal("Error: a: secret name in use")) + Expect(session).Should(ExitWithError(125, "Error: a: secret name in use")) session = podmanTest.Podman([]string{"secret", "create", "-d", "file", "--driver-opts", "opt1=val1", "--replace", "a", secretFilePath}) session.WaitWithDefaultTimeout() @@ -49,8 +47,7 @@ var _ = Describe("Podman secret", func() { inspect = podmanTest.Podman([]string{"secret", "inspect", "-f", "{{.Spec.Driver.Options}}", secrID}) inspect.WaitWithDefaultTimeout() - Expect(inspect).To(ExitWithError()) - Expect(inspect.ErrorToString()).To(ContainSubstring(fmt.Sprintf("Error: inspecting secret: no secret with name or id %q: no such secret", secrID))) + Expect(inspect).To(ExitWithError(125, fmt.Sprintf("Error: inspecting secret: no secret with name or id %q: no such secret", secrID))) inspect = podmanTest.Podman([]string{"secret", "inspect", "-f", "{{.Spec.Driver.Options}}", "a"}) inspect.WaitWithDefaultTimeout() @@ -66,14 +63,12 @@ var _ = Describe("Podman secret", func() { badName := "foo/bar" session := podmanTest.Podman([]string{"secret", "create", badName, secretFilePath}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(Equal(fmt.Sprintf("Error: secret name %q can not include '=', '/', ',', or the '\\0' (NULL) and be between 1 and 253 characters: invalid secret name", badName))) + Expect(session).To(ExitWithError(125, fmt.Sprintf("Error: secret name %q can not include '=', '/', ',', or the '\\0' (NULL) and be between 1 and 253 characters: invalid secret name", badName))) badName = "foo=bar" session = podmanTest.Podman([]string{"secret", "create", badName, secretFilePath}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(Equal(fmt.Sprintf("Error: secret name %q can not include '=', '/', ',', or the '\\0' (NULL) and be between 1 and 253 characters: invalid secret name", badName))) + Expect(session).To(ExitWithError(125, fmt.Sprintf("Error: secret name %q can not include '=', '/', ',', or the '\\0' (NULL) and be between 1 and 253 characters: invalid secret name", badName))) }) It("podman secret inspect", func() { @@ -164,7 +159,7 @@ var _ = Describe("Podman secret", func() { inspect := podmanTest.Podman([]string{"secret", "inspect", "bogus"}) inspect.WaitWithDefaultTimeout() - Expect(inspect).To(ExitWithError()) + Expect(inspect).To(ExitWithError(125, `inspecting secret: no secret with name or id "bogus": no such secret`)) }) It("podman secret ls", func() { @@ -352,7 +347,7 @@ var _ = Describe("Podman secret", func() { // no env variable set, should fail session := podmanTest.Podman([]string{"secret", "create", "--env", "a", "MYENVVAR"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "cannot create store secret data: environment variable MYENVVAR is not set")) os.Setenv("MYENVVAR", "somedata") if IsRemote() { @@ -434,6 +429,6 @@ var _ = Describe("Podman secret", func() { exists := podmanTest.Podman([]string{"secret", "exists", secretName}) exists.WaitWithDefaultTimeout() - Expect(exists).Should(Exit(1)) + Expect(exists).Should(ExitWithError(1, "")) }) }) diff --git a/test/e2e/start_test.go b/test/e2e/start_test.go index df9b123545..773de2ffb5 100644 --- a/test/e2e/start_test.go +++ b/test/e2e/start_test.go @@ -3,13 +3,13 @@ package integration import ( "fmt" "os" + "path/filepath" "strconv" "strings" . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman start", func() { @@ -17,7 +17,7 @@ var _ = Describe("Podman start", func() { It("podman start bogus container", func() { session := podmanTest.Podman([]string{"start", "123"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, `no container with name or ID "123" found: no such container`)) }) It("podman start single container by id", func() { @@ -36,10 +36,10 @@ var _ = Describe("Podman start", func() { Expect(session).Should(ExitCleanly()) session = podmanTest.Podman([]string{"start", "test"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "not found in $PATH")) session = podmanTest.Podman([]string{"container", "exists", "test"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(1, "")) }) It("podman start --rm --attach removed on failure", func() { @@ -49,10 +49,10 @@ var _ = Describe("Podman start", func() { cid := session.OutputToString() session = podmanTest.Podman([]string{"start", "--attach", cid}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "not found in $PATH")) session = podmanTest.Podman([]string{"container", "exists", cid}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(1, "")) }) It("podman container start single container by id", func() { @@ -97,7 +97,7 @@ var _ = Describe("Podman start", func() { session = podmanTest.Podman([]string{"start", "--attach", cid}) session.WaitWithDefaultTimeout() // It should forward the signal - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "")) }) It("podman start multiple containers", func() { @@ -118,7 +118,7 @@ var _ = Describe("Podman start", func() { cid1 := session.OutputToString() session = podmanTest.Podman([]string{"start", cid1, "doesnotexist"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, `no container with name or ID "doesnotexist" found: no such container`)) }) It("podman multiple containers -- attach should fail", func() { @@ -130,7 +130,7 @@ var _ = Describe("Podman start", func() { Expect(session).Should(ExitCleanly()) session = podmanTest.Podman([]string{"start", "-a", "foobar1", "foobar2"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "you cannot start and attach multiple containers at once")) }) It("podman failed to start with --rm should delete the container", func() { @@ -143,7 +143,7 @@ var _ = Describe("Podman start", func() { wait := podmanTest.Podman([]string{"wait", "test1"}) wait.WaitWithDefaultTimeout() - Expect(wait).To(ExitWithError()) + Expect(wait).To(ExitWithError(125, `no container with name or ID "test1" found: no such container`)) Eventually(podmanTest.NumberOfContainers, defaultWaitTimeout, 3.0).Should(BeZero()) }) @@ -155,24 +155,24 @@ var _ = Describe("Podman start", func() { start := podmanTest.Podman([]string{"start", session.OutputToString()}) start.WaitWithDefaultTimeout() - Expect(start).To(ExitWithError()) + Expect(start).To(ExitWithError(125, "not found in $PATH")) Eventually(podmanTest.NumberOfContainers, defaultWaitTimeout, 3.0).Should(Equal(1)) }) It("podman start --sig-proxy should not work without --attach", func() { - session := podmanTest.Podman([]string{"create", ALPINE, "ls"}) + session := podmanTest.Podman([]string{"create", "--name", "sigproxyneedsattach", ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - session = podmanTest.Podman([]string{"start", "-l", "--sig-proxy"}) + session = podmanTest.Podman([]string{"start", "--sig-proxy", "sigproxyneedsattach"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "you cannot use sig-proxy without --attach: invalid argument")) }) It("podman start container with special pidfile", func() { SkipIfRemote("pidfile not handled by remote") - pidfile := tempdir + "pidfile" + pidfile := filepath.Join(tempdir, "pidfile") session := podmanTest.Podman([]string{"create", "--pidfile", pidfile, ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) diff --git a/test/e2e/stats_test.go b/test/e2e/stats_test.go index 5aa1d0d84c..3f0d4a8f16 100644 --- a/test/e2e/stats_test.go +++ b/test/e2e/stats_test.go @@ -8,7 +8,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) // TODO: we need to check the output. Currently, we only check the exit codes @@ -25,7 +24,7 @@ var _ = Describe("Podman stats", func() { It("podman stats with bogus container", func() { session := podmanTest.Podman([]string{"stats", "--no-stream", "123"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, `unable to get list of containers: unable to look up container 123: no container with name or ID "123" found: no such container`)) }) It("podman stats on a running container", func() { @@ -81,7 +80,7 @@ var _ = Describe("Podman stats", func() { Expect(session).Should(ExitCleanly()) stats := podmanTest.Podman([]string{"stats", "-a", "--no-reset", "--no-stream", "--format", "\"table {{.ID}} {{.NoSuchField}} \""}) stats.WaitWithDefaultTimeout() - Expect(stats).To(ExitWithError()) + Expect(stats).To(ExitWithError(125, `template: stats:1:28: executing "stats" at <.NoSuchField>: can't evaluate field NoSuchField in type containers.containerStats`)) }) It("podman stats with negative interval", func() { @@ -90,7 +89,7 @@ var _ = Describe("Podman stats", func() { Expect(session).Should(ExitCleanly()) stats := podmanTest.Podman([]string{"stats", "-a", "--no-reset", "--no-stream", "--interval=-1"}) stats.WaitWithDefaultTimeout() - Expect(stats).To(ExitWithError()) + Expect(stats).To(ExitWithError(125, "invalid interval, must be a positive number greater zero")) }) It("podman stats with zero interval", func() { @@ -99,7 +98,7 @@ var _ = Describe("Podman stats", func() { Expect(session).Should(ExitCleanly()) stats := podmanTest.Podman([]string{"stats", "-a", "--no-reset", "--no-stream", "--interval=0"}) stats.WaitWithDefaultTimeout() - Expect(stats).To(ExitWithError()) + Expect(stats).To(ExitWithError(125, "invalid interval, must be a positive number greater zero")) }) It("podman stats with interval", func() { diff --git a/test/e2e/stop_test.go b/test/e2e/stop_test.go index 2feff4d34e..74b2b63d2c 100644 --- a/test/e2e/stop_test.go +++ b/test/e2e/stop_test.go @@ -2,6 +2,7 @@ package integration import ( "fmt" + "path/filepath" "strings" . "github.com/containers/podman/v5/test/utils" @@ -15,7 +16,7 @@ var _ = Describe("Podman stop", func() { It("podman stop bogus container", func() { session := podmanTest.Podman([]string{"stop", "foobar"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, `no container with name or ID "foobar" found: no such container`)) }) It("podman stop --ignore bogus container", func() { @@ -258,10 +259,9 @@ var _ = Describe("Podman stop", func() { }) It("podman stop --cidfile", func() { - tmpDir := GinkgoT().TempDir() - tmpFile := tmpDir + "cid" + cidFile := filepath.Join(tempdir, "cid") - session := podmanTest.Podman([]string{"create", "--cidfile", tmpFile, ALPINE, "top"}) + session := podmanTest.Podman([]string{"create", "--cidfile", cidFile, ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) cid := session.OutputToStringArray()[0] @@ -270,7 +270,7 @@ var _ = Describe("Podman stop", func() { session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - result := podmanTest.Podman([]string{"stop", "--cidfile", tmpFile}) + result := podmanTest.Podman([]string{"stop", "--cidfile", cidFile}) result.WaitWithDefaultTimeout() Expect(result).Should(ExitCleanly()) output := result.OutputToString() @@ -278,23 +278,22 @@ var _ = Describe("Podman stop", func() { }) It("podman stop multiple --cidfile", func() { - tmpDir := GinkgoT().TempDir() - tmpFile1 := tmpDir + "cid-1" - tmpFile2 := tmpDir + "cid-2" + cidFile1 := filepath.Join(tempdir, "cid-1") + cidFile2 := filepath.Join(tempdir, "cid-2") - session := podmanTest.Podman([]string{"run", "--cidfile", tmpFile1, "-d", ALPINE, "top"}) + session := podmanTest.Podman([]string{"run", "--cidfile", cidFile1, "-d", ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) cid1 := session.OutputToStringArray()[0] Expect(podmanTest.NumberOfContainers()).To(Equal(1)) - session = podmanTest.Podman([]string{"run", "--cidfile", tmpFile2, "-d", ALPINE, "top"}) + session = podmanTest.Podman([]string{"run", "--cidfile", cidFile2, "-d", ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) cid2 := session.OutputToStringArray()[0] Expect(podmanTest.NumberOfContainers()).To(Equal(2)) - result := podmanTest.Podman([]string{"stop", "--cidfile", tmpFile1, "--cidfile", tmpFile2}) + result := podmanTest.Podman([]string{"stop", "--cidfile", cidFile1, "--cidfile", cidFile2}) result.WaitWithDefaultTimeout() Expect(result).Should(ExitCleanly()) output := result.OutputToString() @@ -308,19 +307,19 @@ var _ = Describe("Podman stop", func() { result := podmanTest.Podman([]string{"stop", "--cidfile", "foobar", "--latest"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + Expect(result).Should(ExitWithError(125, "--all, --latest, and --cidfile cannot be used together")) result = podmanTest.Podman([]string{"stop", "--cidfile", "foobar", "--all"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + Expect(result).Should(ExitWithError(125, "--all, --latest, and --cidfile cannot be used together")) result = podmanTest.Podman([]string{"stop", "--cidfile", "foobar", "--all", "--latest"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + Expect(result).Should(ExitWithError(125, "--all, --latest, and --cidfile cannot be used together")) result = podmanTest.Podman([]string{"stop", "--latest", "--all"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + Expect(result).Should(ExitWithError(125, "--all and --latest cannot be used together")) }) It("podman stop --all", func() { @@ -349,7 +348,7 @@ var _ = Describe("Podman stop", func() { session = podmanTest.Podman([]string{"stop", "bogus", cid}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, `no container with name or ID "bogus" found: no such container`)) session = podmanTest.Podman([]string{"stop", "--ignore", "bogus", cid}) session.WaitWithDefaultTimeout() @@ -385,7 +384,7 @@ var _ = Describe("Podman stop", func() { session1 = podmanTest.Podman([]string{"stop", cid1, "-f", "status=running"}) session1.WaitWithDefaultTimeout() - Expect(session1).Should(Exit(125)) + Expect(session1).Should(ExitWithError(125, "--filter takes no arguments")) session1 = podmanTest.Podman([]string{"stop", "-a", "--filter", fmt.Sprintf("id=%swrongid", shortCid3)}) session1.WaitWithDefaultTimeout() diff --git a/test/e2e/system_df_test.go b/test/e2e/system_df_test.go index 5fad53c510..45945f605b 100644 --- a/test/e2e/system_df_test.go +++ b/test/e2e/system_df_test.go @@ -95,8 +95,7 @@ var _ = Describe("podman system df", func() { It("podman system df --format with --verbose", func() { session := podmanTest.Podman([]string{"system", "df", "--format", "json", "--verbose"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(Equal("Error: cannot combine --format and --verbose flags")) + Expect(session).To(ExitWithError(125, "Error: cannot combine --format and --verbose flags")) }) It("podman system df --format json", func() { diff --git a/test/e2e/system_dial_stdio_test.go b/test/e2e/system_dial_stdio_test.go index 43cf47773e..35dfd5efee 100644 --- a/test/e2e/system_dial_stdio_test.go +++ b/test/e2e/system_dial_stdio_test.go @@ -4,7 +4,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("podman system dial-stdio", func() { @@ -22,7 +21,6 @@ var _ = Describe("podman system dial-stdio", func() { } session := podmanTest.Podman([]string{"system", "dial-stdio"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(ContainSubstring("Error: failed to open connection to podman")) + Expect(session).Should(ExitWithError(125, "Error: failed to open connection to podman")) }) }) diff --git a/test/e2e/system_reset_test.go b/test/e2e/system_reset_test.go index f71b29819e..0e641e99e2 100644 --- a/test/e2e/system_reset_test.go +++ b/test/e2e/system_reset_test.go @@ -92,12 +92,16 @@ var _ = Describe("podman system reset", Serial, func() { ctrName := "testctr" port1 := GetPort() port2 := GetPort() - session := podmanTest.Podman([]string{"run", "--name", ctrName, "-p", fmt.Sprintf("%d:%d", port1, port2), "-d", ALPINE, "top"}) + session := podmanTest.Podman([]string{"run", "--name", ctrName, "-p", fmt.Sprintf("%d:%d", port1, port2), "-d", ALPINE, "sleep", "inf"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) + // run system reset on a container that is running + // set a timeout of 9 seconds, which tests that reset is using the timeout + // of zero and forceable killing containers with no wait. + // #21874 reset := podmanTest.Podman([]string{"system", "reset", "--force"}) - reset.WaitWithDefaultTimeout() + reset.WaitWithTimeout(9) Expect(reset).Should(ExitCleanly()) session2 := podmanTest.Podman([]string{"run", "--name", ctrName, "-p", fmt.Sprintf("%d:%d", port1, port2), "-d", ALPINE, "top"}) diff --git a/test/e2e/system_service_test.go b/test/e2e/system_service_test.go deleted file mode 100644 index 1a7e9da571..0000000000 --- a/test/e2e/system_service_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package integration - -import ( - "io" - "net" - "net/http" - "net/url" - "strconv" - "time" - - "github.com/containers/podman/v5/utils" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" -) - -var _ = Describe("podman system service", func() { - - // The timeout used to for the service to respond. As shown in #12167, - // this may take some time on machines under high load. - var timeout = 30 - - Describe("verify timeout", func() { - It("of 2 seconds", func() { - SkipIfRemote("service subcommand not supported remotely") - - address := url.URL{ - Scheme: "tcp", - Host: net.JoinHostPort("localhost", randomPort()), - } - session := podmanTest.Podman([]string{ - "system", "service", "--time=2", address.String(), - }) - defer session.Kill() - - WaitForService(address) - Eventually(session, timeout).Should(Exit(0)) - }) - }) - - Describe("verify pprof endpoints", func() { - // Depends on pkg/api/server/server.go:255 - const magicComment = "pprof service listening on" - - It("are available", func() { - Skip("FIXME: Test is too flaky (#12624)") - SkipIfRemote("service subcommand not supported remotely") - - address := url.URL{ - Scheme: "tcp", - Host: net.JoinHostPort("localhost", randomPort()), - } - - pprofPort := randomPort() - session := podmanTest.Podman([]string{ - "system", "service", "--log-level=debug", "--time=0", - "--pprof-address=localhost:" + pprofPort, address.String(), - }) - defer session.Kill() - - WaitForService(address) - - // Combined with test below we have positive/negative test for pprof - Expect(session.Err.Contents()).Should(ContainSubstring(magicComment)) - - heap := url.URL{ - Scheme: "http", - Host: net.JoinHostPort("localhost", pprofPort), - Path: "/debug/pprof/heap", - RawQuery: "seconds=2", - } - resp, err := http.Get(heap.String()) - Expect(err).ShouldNot(HaveOccurred()) - defer resp.Body.Close() - Expect(resp).To(HaveHTTPStatus(http.StatusOK)) - - body, err := io.ReadAll(resp.Body) - Expect(err).ShouldNot(HaveOccurred()) - Expect(body).ShouldNot(BeEmpty()) - - session.Interrupt().Wait(time.Duration(timeout) * time.Second) - Eventually(session, timeout).Should(Exit(1)) - }) - - It("are not available", func() { - Skip("FIXME: Test is too flaky (#12624)") - SkipIfRemote("service subcommand not supported remotely") - - address := url.URL{ - Scheme: "tcp", - Host: net.JoinHostPort("localhost", randomPort()), - } - - session := podmanTest.Podman([]string{ - "system", "service", "--log-level=debug", "--time=0", address.String(), - }) - defer session.Kill() - - WaitForService(address) - - // Combined with test above we have positive/negative test for pprof - Expect(session.Err.Contents()).ShouldNot(ContainSubstring(magicComment)) - - session.Interrupt().Wait(time.Duration(timeout) * time.Second) - Eventually(session, timeout).Should(Exit(1)) - }) - }) -}) - -// randomPort leans on the go net library to find an available port... -func randomPort() string { - port, err := utils.GetRandomPort() - Expect(err).ShouldNot(HaveOccurred()) - return strconv.Itoa(port) -} diff --git a/test/e2e/systemd_test.go b/test/e2e/systemd_test.go index ce599e7c3e..2b835b3059 100644 --- a/test/e2e/systemd_test.go +++ b/test/e2e/systemd_test.go @@ -66,7 +66,8 @@ WantedBy=default.target checkAvailableJournald() if !journald.journaldSkip { - logs := SystemExec("journalctl", []string{dashWhat, "-n", "20", "-u", serviceName}) + // "-q" needed on fc40+ because something creates /run/log/journal/XXX 2750 + logs := SystemExec("journalctl", []string{dashWhat, "-q", "-n", "20", "-u", serviceName}) Expect(logs).Should(ExitCleanly()) } diff --git a/test/e2e/testdata/sigstore-registries.d-fragment.yaml b/test/e2e/testdata/sigstore-registries.d-fragment.yaml index bc85a96181..ecac462039 100644 --- a/test/e2e/testdata/sigstore-registries.d-fragment.yaml +++ b/test/e2e/testdata/sigstore-registries.d-fragment.yaml @@ -1,5 +1,5 @@ docker: - localhost:5000/sigstore-signed: + localhost:5003/sigstore-signed: use-sigstore-attachments: true - localhost:5000/sigstore-signed-params: + localhost:5003/sigstore-signed-params: use-sigstore-attachments: true diff --git a/test/e2e/toolbox_test.go b/test/e2e/toolbox_test.go index ce2537a6cb..73b1b54ab8 100644 --- a/test/e2e/toolbox_test.go +++ b/test/e2e/toolbox_test.go @@ -38,7 +38,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Toolbox-specific testing", func() { @@ -63,6 +62,11 @@ var _ = Describe("Toolbox-specific testing", func() { if podmanTest.RemoteTest { Skip("Ulimit check does not work with a remote client") } + info := GetHostDistributionInfo() + if info.Distribution == "debian" { + // "expected 1048576 to be >= 1073741816" + Skip("FIXME 2024-05-28 fails on debian, maybe because of systemd 256?") + } var session *PodmanSessionIntegration var containerHardLimit int var rlimit syscall.Rlimit @@ -178,123 +182,18 @@ var _ = Describe("Toolbox-specific testing", func() { Expect(session.OutputToString()).To(ContainSubstring(expectedOutput)) }) - It("podman create --userns=keep-id - entrypoint - adding user with useradd and then removing their password", func() { - SkipIfNotRootless("only meaningful when run rootless") - var session *PodmanSessionIntegration + It("podman run --userns=keep-id - modify /etc/passwd and /etc/group", func() { + passwdLine := "testuser:x:1001:1001::/home/testuser:/bin/sh" + groupLine := "testuser:x:1001:" - var username = "testuser" - var homeDir = "/home/testuser" - var shell = "/bin/sh" - var uid = "1001" - var gid = "1001" - - useradd := fmt.Sprintf("useradd --home-dir %s --shell %s --uid %s %s", - homeDir, shell, uid, username) - passwd := fmt.Sprintf("passwd --delete %s", username) - session = podmanTest.Podman([]string{"create", "--log-driver", "k8s-file", "--name", "test", "--userns=keep-id", "--user", "root:root", fedoraToolbox, "sh", "-c", - fmt.Sprintf("%s; %s; echo READY; sleep 1000", useradd, passwd)}) + // ensure that the container can edit passwd and group files + session := podmanTest.Podman([]string{"run", "--log-driver", "k8s-file", "--name", "test", "--userns=keep-id", + "--user", "root:root", ALPINE, "sh", "-c", + fmt.Sprintf("echo %s > /etc/passwd && echo %s > /etc/group && cat /etc/passwd && cat /etc/group", passwdLine, groupLine)}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - - session = podmanTest.Podman([]string{"start", "test"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - - Expect(WaitContainerReady(podmanTest, "test", "READY", 5, 1)).To(BeTrue()) - - expectedOutput := fmt.Sprintf("%s:x:%s:%s::%s:%s", - username, uid, gid, homeDir, shell) - - session = podmanTest.Podman([]string{"exec", "test", "cat", "/etc/passwd"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).To(ContainSubstring(expectedOutput)) - - expectedOutput = "passwd: Note: deleting a password also unlocks the password." - - session = podmanTest.Podman([]string{"logs", "test"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(0)) - Expect(session.ErrorToString()).To(ContainSubstring(expectedOutput)) - }) - - It("podman create --userns=keep-id + podman exec - adding group with groupadd", func() { - SkipIfNotRootless("only meaningful when run rootless") - var session *PodmanSessionIntegration - - var groupName = "testgroup" - var gid = "1001" - - groupadd := fmt.Sprintf("groupadd --gid %s %s", gid, groupName) - - session = podmanTest.Podman([]string{"create", "--log-driver", "k8s-file", "--name", "test", "--userns=keep-id", "--user", "root:root", fedoraToolbox, "sh", "-c", - fmt.Sprintf("%s; echo READY; sleep 1000", groupadd)}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - - session = podmanTest.Podman([]string{"start", "test"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - - Expect(WaitContainerReady(podmanTest, "test", "READY", 5, 1)).To(BeTrue()) - - session = podmanTest.Podman([]string{"exec", "test", "cat", "/etc/group"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).To(ContainSubstring(groupName)) - - session = podmanTest.Podman([]string{"logs", "test"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).To(ContainSubstring("READY")) - }) - - It("podman create --userns=keep-id - entrypoint - modifying existing user with usermod - add to new group, change home/shell/uid", func() { - SkipIfNotRootless("only meaningful when run rootless") - var session *PodmanSessionIntegration - var badHomeDir = "/home/badtestuser" - var badShell = "/bin/sh" - var badUID = "1001" - var username = "testuser" - var homeDir = "/home/testuser" - var shell = "/bin/bash" - var uid = "1411" - var groupName = "testgroup" - var gid = "1422" - - // The use of bad* in the name of variables does not imply the invocation - // of useradd should fail The user is supposed to be created successfully - // but later his information (uid, home, shell,..) is changed via usermod. - useradd := fmt.Sprintf("useradd --home-dir %s --shell %s --uid %s %s", - badHomeDir, badShell, badUID, username) - groupadd := fmt.Sprintf("groupadd --gid %s %s", - gid, groupName) - usermod := fmt.Sprintf("usermod --append --groups wheel --home %s --shell %s --uid %s --gid %s %s", - homeDir, shell, uid, gid, username) - - session = podmanTest.Podman([]string{"create", "--log-driver", "k8s-file", "--name", "test", "--userns=keep-id", "--user", "root:root", fedoraToolbox, "sh", "-c", - fmt.Sprintf("%s; %s; %s; echo READY; sleep 1000", useradd, groupadd, usermod)}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - - session = podmanTest.Podman([]string{"start", "test"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - - Expect(WaitContainerReady(podmanTest, "test", "READY", 5, 1)).To(BeTrue()) - - expectedUser := fmt.Sprintf("%s:x:%s:%s::%s:%s", - username, uid, gid, homeDir, shell) - - session = podmanTest.Podman([]string{"exec", "test", "cat", "/etc/passwd"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).To(ContainSubstring(expectedUser)) - - session = podmanTest.Podman([]string{"logs", "test"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).To(ContainSubstring("READY")) + Expect(session.OutputToString()).Should(ContainSubstring(passwdLine)) + Expect(session.OutputToString()).Should(ContainSubstring(groupLine)) }) It("podman run --privileged --userns=keep-id --user root:root - entrypoint - (bind)mounting", func() { @@ -312,13 +211,12 @@ var _ = Describe("Toolbox-specific testing", func() { Expect(session).Should(ExitCleanly()) }) - It("podman create + start - with all needed switches for create - sleep as entry-point", func() { + It("podman create + start - with all needed switches for create", func() { SkipIfNotRootless("only meaningful when run rootless") - var session *PodmanSessionIntegration // These should be most of the switches that Toolbox uses to create a "toolbox" container // https://github.com/containers/toolbox/blob/main/src/cmd/create.go - session = podmanTest.Podman([]string{"create", + session := podmanTest.Podman([]string{"create", "--log-driver", "k8s-file", "--dns", "none", "--hostname", "toolbox", @@ -333,20 +231,14 @@ var _ = Describe("Toolbox-specific testing", func() { "--ulimit", "host", "--userns=keep-id", "--user", "root:root", - fedoraToolbox, "sh", "-c", "echo READY; sleep 1000"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - - session = podmanTest.Podman([]string{"start", "test"}) + ALPINE, "sh", "-c", "echo READY"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - Expect(WaitContainerReady(podmanTest, "test", "READY", 5, 1)).To(BeTrue()) - - session = podmanTest.Podman([]string{"logs", "test"}) + session = podmanTest.Podman([]string{"start", "-a", "test"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).To(ContainSubstring("READY")) + Expect(session.OutputToString()).Should(ContainSubstring("READY")) }) It("podman run --userns=keep-id check $HOME", func() { @@ -355,22 +247,20 @@ var _ = Describe("Toolbox-specific testing", func() { currentUser, err := user.Current() Expect(err).ToNot(HaveOccurred()) - session = podmanTest.Podman([]string{"run", "-v", fmt.Sprintf("%s:%s", currentUser.HomeDir, currentUser.HomeDir), "--userns=keep-id", fedoraToolbox, "sh", "-c", "echo $HOME"}) + session = podmanTest.Podman([]string{"run", "-v", fmt.Sprintf("%s:%s", currentUser.HomeDir, currentUser.HomeDir), "--userns=keep-id", ALPINE, "sh", "-c", "echo $HOME"}) session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) Expect(session.OutputToString()).To(ContainSubstring(currentUser.HomeDir)) - if isRootless() { - location := path.Dir(currentUser.HomeDir) - volumeArg := fmt.Sprintf("%s:%s", location, location) - session = podmanTest.Podman([]string{"run", - "--userns=keep-id", - "--volume", volumeArg, - fedoraToolbox, "sh", "-c", "echo $HOME"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).To(ContainSubstring(currentUser.HomeDir)) - } + location := path.Dir(currentUser.HomeDir) + volumeArg := fmt.Sprintf("%s:%s", location, location) + session = podmanTest.Podman([]string{"run", + "--userns=keep-id", + "--volume", volumeArg, + ALPINE, "sh", "-c", "echo $HOME"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(ExitCleanly()) + Expect(session.OutputToString()).To(ContainSubstring(currentUser.HomeDir)) }) }) diff --git a/test/e2e/top_test.go b/test/e2e/top_test.go index 4726934c36..262b62111a 100644 --- a/test/e2e/top_test.go +++ b/test/e2e/top_test.go @@ -7,7 +7,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman top", func() { @@ -15,13 +14,17 @@ var _ = Describe("Podman top", func() { It("podman top without container name or id", func() { result := podmanTest.Podman([]string{"top"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + Expect(result).Should(ExitWithError(125, "you must provide the name or id of a running container")) }) It("podman top on bogus container", func() { result := podmanTest.Podman([]string{"top", "1234"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + expect := `no container with name or ID "1234" found: no such container` + if !IsRemote() { + expect = `unable to look up requested container: ` + expect + } + Expect(result).Should(ExitWithError(125, expect)) }) It("podman top on non-running container", func() { @@ -29,7 +32,7 @@ var _ = Describe("Podman top", func() { Expect(ec).To(Equal(0)) result := podmanTest.Podman([]string{"top", cid}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + Expect(result).Should(ExitWithError(125, "top can only be used on running containers")) }) It("podman top on container", func() { @@ -115,8 +118,19 @@ var _ = Describe("Podman top", func() { // Because the image does not contain this must fail and we know we use the correct podman exec fallback. exec := podmanTest.Podman([]string{"top", session.OutputToString(), "aux"}) exec.WaitWithDefaultTimeout() - Expect(exec).Should(Exit(125)) - Expect(exec.ErrorToString()).Should(ContainSubstring("OCI runtime attempted to invoke a command that was not found")) + Expect(exec).Should(ExitWithError(125, "OCI runtime attempted to invoke a command that was not found")) + + session = podmanTest.Podman([]string{"run", "-d", "--uidmap=0:1000:1000", "--user", "9", fedoraMinimal, "sleep", "inf"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(ExitCleanly()) + + result = podmanTest.Podman([]string{"top", session.OutputToString(), "-ef", "hn"}) + result.WaitWithDefaultTimeout() + Expect(result).Should(ExitCleanly()) + output := result.OutputToString() + Expect(output).To(ContainSubstring("sleep inf")) + // check for https://github.com/containers/podman/issues/22293 + Expect(output).To(HavePrefix("9 "), "user id of process") }) It("podman top with comma-separated options", func() { @@ -142,7 +156,7 @@ var _ = Describe("Podman top", func() { // the wrong input and still print the -ef output instead. result := podmanTest.Podman([]string{"top", cid, "-eo", "invalid"}) result.WaitWithDefaultTimeout() - Expect(result).Should(Exit(125)) + Expect(result).Should(ExitWithError(125, `Error: ps(1) failed with exit code 1: error: unknown user-defined format specifier "invalid"`)) }) It("podman top on privileged container", func() { diff --git a/test/e2e/unshare_test.go b/test/e2e/unshare_test.go index 0b4c115158..9fc5c6d79a 100644 --- a/test/e2e/unshare_test.go +++ b/test/e2e/unshare_test.go @@ -6,17 +6,8 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) -// podman unshare --rootless-netns leaks the process by design. -// Running a container will cause the cleanup to kick in when this container gets stopped. -func cleanupRootlessSlirp4netns(p *PodmanTestIntegration) { - session := p.Podman([]string{"run", "--network", "bridge", ALPINE, "true"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) -} - var _ = Describe("Podman unshare", func() { BeforeEach(func() { if _, err := os.Stat("/proc/self/uid_map"); err != nil { @@ -37,53 +28,38 @@ var _ = Describe("Podman unshare", func() { Expect(session.OutputToString()).ToNot(ContainSubstring(userNS)) }) - It("podman unshare --rootless-netns", func() { - SkipIfRemote("podman-remote unshare is not supported") - defer cleanupRootlessSlirp4netns(podmanTest) - session := podmanTest.Podman([]string{"unshare", "--rootless-netns", "ip", "addr"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).To(ContainSubstring("tap0")) - }) - It("podman unshare exit codes", func() { SkipIfRemote("podman-remote unshare is not supported") session := podmanTest.Podman([]string{"unshare", "false"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "")) Expect(session.OutputToString()).Should(Equal("")) - Expect(session.ErrorToString()).Should(Equal("")) session = podmanTest.Podman([]string{"unshare", "/usr/bin/bogus"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(127)) + Expect(session).Should(ExitWithError(127, "no such file or directory")) Expect(session.OutputToString()).Should(Equal("")) - Expect(session.ErrorToString()).Should(ContainSubstring("no such file or directory")) session = podmanTest.Podman([]string{"unshare", "bogus"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(127)) + Expect(session).Should(ExitWithError(127, "executable file not found in $PATH")) Expect(session.OutputToString()).Should(Equal("")) - Expect(session.ErrorToString()).Should(ContainSubstring("executable file not found in $PATH")) session = podmanTest.Podman([]string{"unshare", "/usr"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(126)) + Expect(session).Should(ExitWithError(126, "permission denied")) Expect(session.OutputToString()).Should(Equal("")) - Expect(session.ErrorToString()).Should(ContainSubstring("permission denied")) session = podmanTest.Podman([]string{"unshare", "--bogus"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, "unknown flag: --bogus")) Expect(session.OutputToString()).Should(Equal("")) - Expect(session.ErrorToString()).Should(ContainSubstring("unknown flag: --bogus")) }) It("podman unshare check remote error", func() { SkipIfNotRemote("check for podman-remote unshare error") session := podmanTest.Podman([]string{"unshare"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) - Expect(session.ErrorToString()).To(Equal(`Error: cannot use command "podman-remote unshare" with the remote podman client`)) + Expect(session).Should(ExitWithError(125, `Error: cannot use command "podman-remote unshare" with the remote podman client`)) }) }) diff --git a/test/e2e/untag_test.go b/test/e2e/untag_test.go index 628e398aee..0e7ba94aae 100644 --- a/test/e2e/untag_test.go +++ b/test/e2e/untag_test.go @@ -4,7 +4,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman untag", func() { @@ -35,7 +34,7 @@ var _ = Describe("Podman untag", func() { for _, t := range tags { session = podmanTest.Podman([]string{"image", "exists", t}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "")) } }) @@ -68,7 +67,7 @@ var _ = Describe("Podman untag", func() { session = podmanTest.Podman([]string{"image", "exists", tt.normalized}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "")) } }) diff --git a/test/e2e/update_test.go b/test/e2e/update_test.go index 2053d60dbd..27c091da17 100644 --- a/test/e2e/update_test.go +++ b/test/e2e/update_test.go @@ -3,6 +3,7 @@ package integration import ( "github.com/containers/common/pkg/cgroupv2" . "github.com/containers/podman/v5/test/utils" + "github.com/containers/storage/pkg/fileutils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -35,47 +36,25 @@ var _ = Describe("Podman update", func() { Expect(session).Should(ExitCleanly()) // checking cpu quota from --cpus - session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).Should(ContainSubstring("500000")) + podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/cpu/cpu.cfs_quota_us", "500000") // checking cpuset-cpus - session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpuset/cpuset.cpus"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).Should(Equal("0")) + podmanTest.CheckFileInContainer(ctrID, "/sys/fs/cgroup/cpuset/cpuset.cpus", "0") // checking cpuset-mems - session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpuset/cpuset.mems"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).Should(Equal("0")) + podmanTest.CheckFileInContainer(ctrID, "/sys/fs/cgroup/cpuset/cpuset.mems", "0") // checking memory limit - session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/memory/memory.limit_in_bytes"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).Should(ContainSubstring("1073741824")) + podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/memory/memory.limit_in_bytes", "1073741824") // checking memory-swap - session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).Should(ContainSubstring("2147483648")) + podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes", "2147483648") // checking cpu-shares - session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpu/cpu.shares"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).Should(ContainSubstring("123")) + podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/cpu/cpu.shares", "123") // checking pids-limit - session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/pids/pids.max"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).Should(ContainSubstring("123")) - + podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/pids/pids.max", "123") }) It("podman update container unspecified pid limit", func() { @@ -99,10 +78,7 @@ var _ = Describe("Podman update", func() { ctrID = session.OutputToString() // checking pids-limit was not changed after update when not specified as an option - session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/pids.max"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).Should(ContainSubstring("max")) + podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/pids.max", "max") }) It("podman update container all options v2", func() { @@ -138,58 +114,33 @@ var _ = Describe("Podman update", func() { ctrID = session.OutputToString() // checking cpu quota and period - session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpu.max"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).Should(ContainSubstring("500000")) + podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/cpu.max", "500000") - // checking blkio weight - session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/io.bfq.weight"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).Should(ContainSubstring("123")) + // checking blkio weight (as of 2024-05 this file does not exist on Debian 13) + if err := fileutils.Exists("/sys/fs/cgroup/system.slice/io.bfq.weight"); err == nil { + podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/io.bfq.weight", "123") + } // checking device-read/write-bps/iops - session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/io.max"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).Should(ContainSubstring("rbps=10485760 wbps=10485760 riops=1000 wiops=1000")) + podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/io.max", "rbps=10485760 wbps=10485760 riops=1000 wiops=1000") // checking cpuset-cpus - session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpuset.cpus"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).Should(Equal("0")) + podmanTest.CheckFileInContainer(ctrID, "/sys/fs/cgroup/cpuset.cpus", "0") // checking cpuset-mems - session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpuset.mems"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).Should(Equal("0")) + podmanTest.CheckFileInContainer(ctrID, "/sys/fs/cgroup/cpuset.mems", "0") // checking memory limit - session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/memory.max"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).Should(ContainSubstring("1073741824")) + podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/memory.max", "1073741824") // checking memory-swap - session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/memory.swap.max"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).Should(ContainSubstring("1073741824")) + podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/memory.swap.max", "1073741824") // checking cpu-shares - session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpu.weight"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).Should(ContainSubstring("5")) + podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/cpu.weight", "5") // checking pids-limit - session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/pids.max"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).Should(ContainSubstring("123")) + podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/pids.max", "123") }) It("podman update keep original resources if not overridden", func() { @@ -209,13 +160,85 @@ var _ = Describe("Podman update", func() { ctrID := session.OutputToString() + path := "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" if v2, _ := cgroupv2.Enabled(); v2 { - session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpu.max"}) - } else { - session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"}) + path = "/sys/fs/cgroup/cpu.max" } - session.WaitWithDefaultTimeout() - Expect(session).Should(ExitCleanly()) - Expect(session.OutputToString()).Should(ContainSubstring("500000")) + + podmanTest.CheckFileInContainerSubstring(ctrID, path, "500000") + }) + + It("podman update persists changes", func() { + SkipIfCgroupV1("testing flags that only work in cgroup v2") + SkipIfRootless("many of these handlers are not enabled while rootless in CI") + + memoryInspect := ".HostConfig.Memory" + memoryCgroup := "/sys/fs/cgroup/memory.max" + mem512m := "536870912" + mem256m := "268435456" + + testCtr := "test-ctr-name" + ctr1 := podmanTest.Podman([]string{"run", "-d", "--name", testCtr, "-m", "512m", ALPINE, "top"}) + ctr1.WaitWithDefaultTimeout() + Expect(ctr1).Should(ExitCleanly()) + + podmanTest.CheckContainerSingleField(testCtr, memoryInspect, mem512m) + podmanTest.CheckFileInContainer(testCtr, memoryCgroup, mem512m) + + update := podmanTest.Podman([]string{"update", "-m", "256m", testCtr}) + update.WaitWithDefaultTimeout() + Expect(update).Should(ExitCleanly()) + + podmanTest.CheckContainerSingleField(testCtr, memoryInspect, mem256m) + podmanTest.CheckFileInContainer(testCtr, memoryCgroup, mem256m) + + restart := podmanTest.Podman([]string{"restart", testCtr}) + restart.WaitWithDefaultTimeout() + Expect(restart).Should(ExitCleanly()) + + podmanTest.CheckContainerSingleField(testCtr, memoryInspect, mem256m) + podmanTest.CheckFileInContainer(testCtr, memoryCgroup, mem256m) + + pause := podmanTest.Podman([]string{"pause", testCtr}) + pause.WaitWithDefaultTimeout() + Expect(pause).Should(ExitCleanly()) + + update2 := podmanTest.Podman([]string{"update", "-m", "512m", testCtr}) + update2.WaitWithDefaultTimeout() + Expect(update2).Should(ExitCleanly()) + + unpause := podmanTest.Podman([]string{"unpause", testCtr}) + unpause.WaitWithDefaultTimeout() + Expect(unpause).Should(ExitCleanly()) + + podmanTest.CheckContainerSingleField(testCtr, memoryInspect, mem512m) + podmanTest.CheckFileInContainer(testCtr, memoryCgroup, mem512m) + }) + + It("podman update sets restart policy", func() { + restartPolicyName := ".HostConfig.RestartPolicy.Name" + restartPolicyRetries := ".HostConfig.RestartPolicy.MaximumRetryCount" + + testCtr := "test-ctr-name" + ctr1 := podmanTest.Podman([]string{"run", "-dt", "--name", testCtr, ALPINE, "top"}) + ctr1.WaitWithDefaultTimeout() + Expect(ctr1).Should(ExitCleanly()) + + podmanTest.CheckContainerSingleField(testCtr, restartPolicyName, "no") + podmanTest.CheckContainerSingleField(testCtr, restartPolicyRetries, "0") + + update1 := podmanTest.Podman([]string{"update", "--restart", "on-failure:5", testCtr}) + update1.WaitWithDefaultTimeout() + Expect(update1).Should(ExitCleanly()) + + podmanTest.CheckContainerSingleField(testCtr, restartPolicyName, "on-failure") + podmanTest.CheckContainerSingleField(testCtr, restartPolicyRetries, "5") + + update2 := podmanTest.Podman([]string{"update", "--restart", "always", testCtr}) + update2.WaitWithDefaultTimeout() + Expect(update2).Should(ExitCleanly()) + + podmanTest.CheckContainerSingleField(testCtr, restartPolicyName, "always") + podmanTest.CheckContainerSingleField(testCtr, restartPolicyRetries, "0") }) }) diff --git a/test/e2e/volume_create_test.go b/test/e2e/volume_create_test.go index 1b747b2d45..71b042a9d5 100644 --- a/test/e2e/volume_create_test.go +++ b/test/e2e/volume_create_test.go @@ -47,7 +47,7 @@ var _ = Describe("Podman volume create", func() { session = podmanTest.Podman([]string{"volume", "create", "myvol"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "volume with name myvol already exists: volume already exists")) }) It("podman create volume --ignore", func() { @@ -133,24 +133,21 @@ var _ = Describe("Podman volume create", func() { session := podmanTest.Podman([]string{"volume", "import", "notfound", "notfound.tar"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("open notfound.tar: no such file or directory")) + Expect(session).To(ExitWithError(125, "open notfound.tar: no such file or directory")) session = podmanTest.Podman([]string{"volume", "import", "notfound", "-"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("no such volume notfound")) + Expect(session).To(ExitWithError(125, "no such volume notfound")) session = podmanTest.Podman([]string{"volume", "export", "notfound"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) - Expect(session.ErrorToString()).To(ContainSubstring("no such volume notfound")) + Expect(session).To(ExitWithError(125, "no such volume notfound")) }) It("podman create volume with bad volume option", func() { session := podmanTest.Podman([]string{"volume", "create", "--opt", "badOpt=bad"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "invalid mount option badOpt for driver 'local': invalid argument")) }) It("podman create volume with o=uid,gid", func() { diff --git a/test/e2e/volume_exists_test.go b/test/e2e/volume_exists_test.go index 4f92a17363..d0709048db 100644 --- a/test/e2e/volume_exists_test.go +++ b/test/e2e/volume_exists_test.go @@ -5,7 +5,6 @@ import ( "github.com/containers/storage/pkg/stringid" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman volume exists", func() { @@ -26,6 +25,6 @@ var _ = Describe("Podman volume exists", func() { session = podmanTest.Podman([]string{"volume", "exists", stringid.GenerateRandomID()}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, "")) }) }) diff --git a/test/e2e/volume_plugin_test.go b/test/e2e/volume_plugin_test.go index 22c517b35c..939e3a5e3c 100644 --- a/test/e2e/volume_plugin_test.go +++ b/test/e2e/volume_plugin_test.go @@ -27,13 +27,13 @@ var _ = Describe("Podman volume plugins", func() { It("volume create with nonexistent plugin errors", func() { session := podmanTest.Podman([]string{"volume", "create", "--driver", "notexist", "test_volume_name"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, "volume test_volume_name uses volume plugin notexist but it could not be retrieved: no volume plugin with name notexist available: required plugin missing")) }) It("volume create with not-running plugin does not error", func() { session := podmanTest.Podman([]string{"volume", "create", "--driver", "testvol0", "test_volume_name"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + Expect(session).To(ExitWithError(125, `Error: volume test_volume_name uses volume plugin testvol0 but it could not be retrieved: cannot access plugin testvol0 socket "/run/docker/plugins/testvol0.sock": stat /run/docker/plugins/testvol0.sock: no such file or directory`)) }) It("volume create and remove with running plugin succeeds", func() { @@ -145,7 +145,7 @@ var _ = Describe("Podman volume plugins", func() { // Remove should exit non-zero because missing plugin remove := podmanTest.Podman([]string{"volume", "rm", volName}) remove.WaitWithDefaultTimeout() - Expect(remove).To(ExitWithError()) + Expect(remove).To(ExitWithError(125, "cannot remove volume testVolume1 from plugin testvol3, but it has been removed from Podman: required plugin missing")) // But the volume should still be gone ls2 := podmanTest.Podman([]string{"volume", "ls", "-q"}) diff --git a/test/e2e/volume_rm_test.go b/test/e2e/volume_rm_test.go index 7b01fc90bf..59f611f131 100644 --- a/test/e2e/volume_rm_test.go +++ b/test/e2e/volume_rm_test.go @@ -1,10 +1,11 @@ package integration import ( + "fmt" + . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman volume rm", func() { @@ -30,14 +31,13 @@ var _ = Describe("Podman volume rm", func() { It("podman volume rm with --force flag", func() { session := podmanTest.Podman([]string{"create", "-v", "myvol:/myvol", ALPINE, "ls"}) - cid := session.OutputToString() session.WaitWithDefaultTimeout() Expect(session).Should(ExitCleanly()) + cid := session.OutputToString() session = podmanTest.Podman([]string{"volume", "rm", "myvol"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(2)) - Expect(session.ErrorToString()).To(ContainSubstring(cid)) + Expect(session).Should(ExitWithError(2, fmt.Sprintf("volume myvol is being used by the following container(s): %s: volume is being used", cid))) session = podmanTest.Podman([]string{"volume", "rm", "-t", "0", "-f", "myvol"}) session.WaitWithDefaultTimeout() @@ -52,7 +52,7 @@ var _ = Describe("Podman volume rm", func() { It("podman volume remove bogus", func() { session := podmanTest.Podman([]string{"volume", "rm", "bogus"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(1)) + Expect(session).Should(ExitWithError(1, `no volume with name "bogus" found: no such volume`)) }) It("podman rm with --all flag", func() { @@ -100,7 +100,12 @@ var _ = Describe("Podman volume rm", func() { session = podmanTest.Podman([]string{"volume", "rm", "myv"}) session.WaitWithDefaultTimeout() - Expect(session).To(ExitWithError()) + expect := "more than one result for volume name myv: volume already exists" + if podmanTest.DatabaseBackend == "boltdb" { + // boltdb issues volume name in quotes + expect = `more than one result for volume name "myv": volume already exists` + } + Expect(session).To(ExitWithError(125, expect)) session = podmanTest.Podman([]string{"volume", "ls"}) session.WaitWithDefaultTimeout() diff --git a/test/e2e/wait_test.go b/test/e2e/wait_test.go index 7d84a88d14..205c4d33dd 100644 --- a/test/e2e/wait_test.go +++ b/test/e2e/wait_test.go @@ -4,7 +4,6 @@ import ( . "github.com/containers/podman/v5/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman wait", func() { @@ -12,7 +11,7 @@ var _ = Describe("Podman wait", func() { It("podman wait on bogus container", func() { session := podmanTest.Podman([]string{"wait", "1234"}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, `no container with name or ID "1234" found: no such container`)) }) @@ -86,7 +85,7 @@ var _ = Describe("Podman wait", func() { Expect(session).Should(ExitCleanly()) session = podmanTest.Podman([]string{"container", "wait", "--interval", "100days", session.OutputToString()}) session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(125)) + Expect(session).Should(ExitWithError(125, `time: unknown unit "days" in duration "100days"`)) }) It("podman wait on three containers", func() { diff --git a/test/farm/001-farm.bats b/test/farm/001-farm.bats index e9d987b9bb..ad918998d3 100644 --- a/test/farm/001-farm.bats +++ b/test/farm/001-farm.bats @@ -116,6 +116,13 @@ EOF @test "farm - build on farm node only (podman-remote)" { iname="test-image-5" + # ManifestAdd only + echo "Running test with ManifestAdd only..." + run_podman --remote farm build --authfile $AUTHFILE --tls-verify=false -t $REGISTRY/$iname $FARM_TMPDIR + assert "$output" =~ "Farm \"$FARMNAME\" ready" + + # ManifestListClear and ManifestAdd + echo "Running test with ManifestListClear and ManifestAdd..." run_podman --remote farm build --authfile $AUTHFILE --tls-verify=false -t $REGISTRY/$iname $FARM_TMPDIR assert "$output" =~ "Farm \"$FARMNAME\" ready" diff --git a/test/minikube/001-kube.bats b/test/minikube/001-kube.bats index 04137ddbad..2f3ffeec8b 100755 --- a/test/minikube/001-kube.bats +++ b/test/minikube/001-kube.bats @@ -9,11 +9,9 @@ load helpers.bash # BEGIN tests @test "minikube - check cluster is up" { - run minikube kubectl get nodes - assert "$status" -eq 0 "get status of nodes" + run_minikube kubectl get nodes assert "$output" =~ "Ready" - run minikube kubectl get pods - assert "$status" -eq 0 "get pods in the default namespace" + run_minikube kubectl get pods assert "$output" == "No resources found in default namespace." } @@ -25,14 +23,11 @@ load helpers.bash # deploy to the minikube cluster project="ctr-ns" - run minikube kubectl create namespace $project - assert "$status" -eq 0 "create new namespace $project" - run minikube kubectl -- apply -f $fname - assert "$status" -eq 0 "deploy $fname to the cluster" + run_minikube kubectl create namespace $project + run_minikube kubectl -- apply -f $fname assert "$output" == "pod/$cname-pod created" wait_for_pods_to_start - run minikube kubectl delete namespace $project - assert $status -eq 0 "delete namespace $project" + run_minikube kubectl delete namespace $project } @test "minikube - deploy generated pod yaml to minikube" { @@ -48,14 +43,11 @@ load helpers.bash # deploy to the minikube cluster project="pod-ns" - run minikube kubectl create namespace $project - assert "$status" -eq 0 "create new namespace $project" - run minikube kubectl -- apply -f $fname - assert "$status" -eq 0 "deploy $fname to the cluster" + run_minikube kubectl create namespace $project + run_minikube kubectl -- apply -f $fname assert "$output" == "pod/$pname created" wait_for_pods_to_start - run minikube kubectl delete namespace $project - assert $status -eq 0 "delete namespace $project" + run_minikube kubectl delete namespace $project } @test "minikube - apply podman ctr to cluster" { @@ -64,35 +56,29 @@ load helpers.bash # deploy to minikube cluster with kube apply project="ctr-apply" - run minikube kubectl create namespace $project - assert "$status" -eq 0 "create new namespace $project" + run_minikube kubectl create namespace $project run_podman kube apply --kubeconfig $KUBECONFIG --ns $project $cname assert "$output" =~ "Successfully deployed workloads to cluster!" - run minikube kubectl -- get pods --namespace $project - assert "$status" -eq 0 "kube apply $cname to the cluster" + run_minikube kubectl -- get pods --namespace $project assert "$output" =~ "$cname-pod" wait_for_pods_to_start - run minikube kubectl delete namespace $project - assert $status -eq 0 "delete namespace $project" + run_minikube kubectl delete namespace $project } @test "minikube - apply podman pod to cluster" { pname="test-pod-apply" run_podman pod create --name $pname - run podman container create --pod $pname $IMAGE top + run_podman container create --pod $pname $IMAGE top # deploy to minikube cluster with kube apply project="pod-apply" - run minikube kubectl create namespace $project - assert "$status" -eq 0 "create new namespace $project" + run_minikube kubectl create namespace $project run_podman kube apply --kubeconfig $KUBECONFIG --ns $project $pname assert "$output" =~ "Successfully deployed workloads to cluster!" - run minikube kubectl -- get pods --namespace $project - assert "$status" -eq 0 "kube apply $pname to the cluster" + run_minikube kubectl -- get pods --namespace $project assert "$output" =~ "$pname" wait_for_pods_to_start - run minikube kubectl delete namespace $project - assert $status -eq 0 "delete namespace $project" + run_minikube kubectl delete namespace $project } @test "minikube - deploy generated kube yaml with podman kube apply to cluster" { @@ -108,16 +94,13 @@ load helpers.bash # deploy to minikube cluster with kube apply project="yaml-apply" - run minikube kubectl create namespace $project - assert "$status" -eq 0 "create new namespace $project" + run_minikube kubectl create namespace $project run_podman kube apply --kubeconfig $KUBECONFIG --ns $project -f $fname assert "$output" =~ "Successfully deployed workloads to cluster!" - run minikube kubectl -- get pods --namespace $project - assert "$status" -eq 0 "kube apply $pname to the cluster" + run_minikube kubectl -- get pods --namespace $project assert "$output" =~ "$pname" wait_for_pods_to_start - run minikube kubectl delete namespace $project - assert $status -eq 0 "delete namespace $project" + run_minikube kubectl delete namespace $project } @test "minikube - apply podman ctr with volume to cluster" { @@ -127,19 +110,15 @@ load helpers.bash # deploy to minikube cluster with kube apply project="ctr-vol-apply" - run minikube kubectl create namespace $project - assert "$status" -eq 0 "create new namespace $project" + run_minikube kubectl create namespace $project run_podman kube apply --kubeconfig $KUBECONFIG --ns $project $cname $vname assert "$output" =~ "Successfully deployed workloads to cluster!" - run minikube kubectl -- get pods --namespace $project - assert "$status" -eq 0 "kube apply $cname to the cluster" + run_minikube kubectl -- get pods --namespace $project assert "$output" =~ "$cname-pod" - run minikube kubectl -- get pvc --namespace $project - assert "$status" -eq 0 "kube apply $vname to the cluster" + run_minikube kubectl -- get pvc --namespace $project assert "$output" =~ "$vname" wait_for_pods_to_start - run minikube kubectl delete namespace $project - assert $status -eq 0 "delete namespace $project" + run_minikube kubectl delete namespace $project } @test "minikube - apply podman ctr with service to cluster" { @@ -148,19 +127,15 @@ load helpers.bash # deploy to minikube cluster with kube apply project="ctr-svc-apply" - run minikube kubectl create namespace $project - assert "$status" -eq 0 "create new namespace $project" + run_minikube kubectl create namespace $project run_podman kube apply --kubeconfig $KUBECONFIG -s --ns $project $cname assert "$output" =~ "Successfully deployed workloads to cluster!" - run minikube kubectl -- get pods --namespace $project - assert "$status" -eq 0 "kube apply $cname to the cluster" + run_minikube kubectl -- get pods --namespace $project assert "$output" =~ "$cname-pod" - run minikube kubectl -- get svc --namespace $project - assert "$status" -eq 0 "kube apply service to the cluster" + run_minikube kubectl -- get svc --namespace $project assert "$output" =~ "$cname-pod" wait_for_pods_to_start - run minikube kubectl delete namespace $project - assert $status -eq 0 "delete namespace $project" + run_minikube kubectl delete namespace $project } @test "minikube - deploy generated container yaml to minikube --type=deployment" { @@ -171,14 +146,11 @@ load helpers.bash # deploy to the minikube cluster project="dep-ctr-ns" - run minikube kubectl create namespace $project - assert "$status" -eq 0 "create new namespace $project" - run minikube kubectl -- apply -f $fname - assert "$status" -eq 0 "deploy $fname to the cluster" + run_minikube kubectl create namespace $project + run_minikube kubectl -- apply -f $fname assert "$output" == "deployment.apps/$cname-pod-deployment created" wait_for_pods_to_start - run minikube kubectl delete namespace $project - assert $status -eq 0 "delete namespace $project" + run_minikube kubectl delete namespace $project } @test "minikube - deploy generated pod yaml to minikube --type=deployment" { @@ -194,14 +166,11 @@ load helpers.bash # deploy to the minikube cluster project="dep-pod-ns" - run minikube kubectl create namespace $project - assert "$status" -eq 0 "create new namespace $project" - run minikube kubectl -- apply -f $fname - assert "$status" -eq 0 "deploy $fname to the cluster" + run_minikube kubectl create namespace $project + run_minikube kubectl -- apply -f $fname assert "$output" == "deployment.apps/$pname-deployment created" wait_for_pods_to_start - run minikube kubectl delete namespace $project - assert $status -eq 0 "delete namespace $project" + run_minikube kubectl delete namespace $project } @test "minikube - deploy generated container yaml to minikube --type=daemonset" { @@ -212,14 +181,11 @@ load helpers.bash # deploy to the minikube cluster project="dep-ctr-ns" - run minikube kubectl create namespace $project - assert "$status" -eq 0 "create new namespace $project" - run minikube kubectl -- apply -f $fname - assert "$status" -eq 0 "deploy $fname to the cluster" + run_minikube kubectl create namespace $project + run_minikube kubectl -- apply -f $fname assert "$output" == "daemonset.apps/$cname-pod-daemonset created" wait_for_pods_to_start - run minikube kubectl delete namespace $project - assert $status -eq 0 "delete namespace $project" + run_minikube kubectl delete namespace $project } @test "minikube - deploy generated pod yaml to minikube --type=daemonset" { @@ -235,12 +201,9 @@ load helpers.bash # deploy to the minikube cluster project="dep-pod-ns" - run minikube kubectl create namespace $project - assert "$status" -eq 0 "create new namespace $project" - run minikube kubectl -- apply -f $fname - assert "$status" -eq 0 "deploy $fname to the cluster" + run_minikube kubectl create namespace $project + run_minikube kubectl -- apply -f $fname assert "$output" == "daemonset.apps/$pname-daemonset created" wait_for_pods_to_start - run minikube kubectl delete namespace $project - assert $status -eq 0 "delete namespace $project" + run_minikube kubectl delete namespace $project } diff --git a/test/minikube/helpers.bash b/test/minikube/helpers.bash index c47b711e3d..24e394647c 100644 --- a/test/minikube/helpers.bash +++ b/test/minikube/helpers.bash @@ -4,10 +4,55 @@ load ../system/helpers.bash KUBECONFIG="$HOME/.kube/config" +################## +# run_minikube # Local helper, with instrumentation for debugging failures +################## +function run_minikube() { + # Number as first argument = expected exit code; default 0 + local expected_rc=0 + case "$1" in + [0-9]) expected_rc=$1; shift;; + [1-9][0-9]) expected_rc=$1; shift;; + [12][0-9][0-9]) expected_rc=$1; shift;; + '?') expected_rc= ; shift;; # ignore exit code + esac + + # stdout is only emitted upon error; this printf is to help in debugging + printf "\n%s %s %s %s\n" "$(timestamp)" "\$" "minikube" "$*" + run minikube "$@" + # without "quotes", multiple lines are glommed together into one + if [[ -n "$output" ]]; then + echo "$(timestamp) $output" + fi + if [[ "$status" -ne 0 ]]; then + echo -n "$(timestamp) [ rc=$status "; + if [[ -n "$expected_rc" ]]; then + if [[ "$status" -eq "$expected_rc" ]]; then + echo -n "(expected) "; + else + echo -n "(** EXPECTED $expected_rc **) "; + fi + fi + echo "]" + fi + + if [[ -n "$expected_rc" ]]; then + if [[ "$status" -ne "$expected_rc" ]]; then + # Further debugging + echo "\$ minikube logs" + run minikube logs + echo "$output" + + die "exit code is $status; expected $expected_rc" + fi + fi +} + + function setup(){ # only set up the minikube cluster before the first test if [[ "$BATS_TEST_NUMBER" -eq 1 ]]; then - minikube start + run_minikube start wait_for_default_sa fi basic_setup @@ -17,8 +62,12 @@ function teardown(){ # only delete the minikube cluster if we are done with the last test # the $DEBUG_MINIKUBE env can be set to preserve the cluster to debug if needed if [[ "$BATS_TEST_NUMBER" -eq ${#BATS_TEST_NAMES[@]} ]] && [[ "$DEBUG_MINIKUBE" == "" ]]; then - minikube delete + run_minikube delete fi + + # Prevents nasty red warnings in log + run_podman rmi --ignore $(pause_image) + basic_teardown } @@ -29,8 +78,7 @@ function wait_for_default_sa(){ # if the default service account hasn't been created yet, there is something else wrong while [[ $count -lt 30 ]] && [[ $sa_ready == false ]] do - run minikube kubectl get sa - assert "$status" -eq 0 + run_minikube kubectl get sa if [[ "$output" != "No resources found in default namespace." ]]; then sa_ready=true fi @@ -49,7 +97,7 @@ function wait_for_pods_to_start(){ # if the pod hasn't started running after 30 seconds, there is something else wrong while [[ $count -lt 30 ]] && [[ $running == false ]] do - run minikube kubectl get pods + run_minikube kubectl get pods assert "$status" -eq 0 if [[ "$output" =~ "Running" ]]; then running=true diff --git a/test/python/docker/compat/constant.py b/test/python/docker/compat/constant.py index 2a0046daf6..f6b9c9a35d 100644 --- a/test/python/docker/compat/constant.py +++ b/test/python/docker/compat/constant.py @@ -6,3 +6,4 @@ ALPINE_TARBALL = "alpine.tar" BB = "quay.io/libpod/busybox:latest" NGINX = "quay.io/libpod/alpine_nginx:latest" +DOCKER_API_COMPATIBILITY_VERSION = "1.44" diff --git a/test/python/docker/compat/test_containers.py b/test/python/docker/compat/test_containers.py index 0595b29123..cb2c5f340b 100644 --- a/test/python/docker/compat/test_containers.py +++ b/test/python/docker/compat/test_containers.py @@ -2,19 +2,25 @@ Integration tests for exercising docker-py against Podman Service. """ import io +import json import tarfile import threading import time from typing import IO, List, Optional +import yaml from docker import errors from docker.models.containers import Container from docker.models.images import Image from docker.models.volumes import Volume from docker.types import Mount +from jsonschema.exceptions import best_match, ValidationError # pylint: disable=no-name-in-module,import-error,wrong-import-order from test.python.docker.compat import common, constant +from openapi_schema_validator import OAS31Validator + +from test.python.docker.compat.constant import DOCKER_API_COMPATIBILITY_VERSION # pylint: disable=missing-function-docstring @@ -302,3 +308,38 @@ def wait_and_start(): finally: ctr.stop() ctr.remove(force=True) + + def test_container_inspect_compatibility(self): + """Test container inspect result compatibility with DOCKER_API. + When upgrading module "github.com/docker/docker" this test might fail, if so please correct podman inspect + command result to stay compatible with docker. + """ + ctr = self.docker.containers.create(image="alpine", detach=True) + try: + spec = yaml.load(open("vendor/github.com/docker/docker/api/swagger.yaml").read(), Loader=yaml.Loader) + ctr_inspect = json.loads(self.podman.run("inspect", ctr.id).stdout)[0] + schema = spec['paths']["/containers/{id}/json"]["get"]['responses'][200]['schema'] + schema["definitions"] = spec["definitions"] + + OAS31Validator.check_schema(schema) + validator = OAS31Validator(schema) + important_error = [] + for error in validator.iter_errors(ctr_inspect): + if isinstance(error, ValidationError): + # ignore None instead of object/array/string errors + if error.message.startswith("None is not of type"): + continue + # ignore Windows specific option error + if error.json_path == '$.HostConfig.Isolation': + continue + important_error.append(error) + if important_error: + if newversion := spec["info"]["version"] != DOCKER_API_COMPATIBILITY_VERSION: + ex = Exception(f"There may be a breaking change in Docker API between " + f"{DOCKER_API_COMPATIBILITY_VERSION} and {newversion}") + raise best_match(important_error) from ex + else: + raise best_match(important_error) + finally: + ctr.stop() + ctr.remove(force=True) diff --git a/test/python/requirements.txt b/test/python/requirements.txt index 5a19ac00b8..929770adcb 100644 --- a/test/python/requirements.txt +++ b/test/python/requirements.txt @@ -1,6 +1,8 @@ docker~=6.1.0 -requests-mock~=1.11.0 -requests~=2.31.0 -setuptools~=69.1.0 -python-dateutil~=2.8.1 +requests-mock~=1.12.1 +requests~=2.32.3 +setuptools~=70.1.0 +python-dateutil~=2.9.0 PyYAML~=6.0.0 +openapi-schema-validator~=0.6.2 +pytest==8.1.2 diff --git a/test/system/001-basic.bats b/test/system/001-basic.bats index 7b4cc58b3e..7c10296290 100644 --- a/test/system/001-basic.bats +++ b/test/system/001-basic.bats @@ -195,10 +195,10 @@ See 'podman version --help'" "podman version --remote" } @test "podman --log-level recognizes log levels" { - run_podman 1 --log-level=telepathic info + run_podman 1 --log-level=telepathic version is "$output" 'Log Level "telepathic" is not supported.*' - run_podman --log-level=trace info + run_podman --log-level=trace version if ! is_remote; then # podman-remote does not do any trace logging assert "$output" =~ " level=trace " "log-level=trace" @@ -207,33 +207,33 @@ See 'podman version --help'" "podman version --remote" assert "$output" =~ " level=info " "log-level=trace includes info" assert "$output" !~ " level=warn" "log-level=trace does not show warn" - run_podman --log-level=debug info + run_podman --log-level=debug version assert "$output" !~ " level=trace " "log-level=debug does not show trace" assert "$output" =~ " level=debug " "log-level=debug" assert "$output" =~ " level=info " "log-level=debug includes info" assert "$output" !~ " level=warn" "log-level=debug does not show warn" - run_podman --log-level=info info + run_podman --log-level=info version assert "$output" !~ " level=trace " "log-level=info does not show trace" assert "$output" !~ " level=debug " "log-level=info does not show debug" assert "$output" =~ " level=info " "log-level=info" - run_podman --log-level=warn info + run_podman --log-level=warn version assert "$output" !~ " level=" "log-level=warn shows no logs at all" - run_podman --log-level=warning info + run_podman --log-level=warning version assert "$output" !~ " level=" "log-level=warning shows no logs at all" - run_podman --log-level=error info + run_podman --log-level=error version assert "$output" !~ " level=" "log-level=error shows no logs at all" # docker compat - run_podman --debug info + run_podman --debug version assert "$output" =~ " level=debug " "podman --debug gives debug output" - run_podman -D info + run_podman -D version assert "$output" =~ " level=debug " "podman -D gives debug output" - run_podman 1 --debug --log-level=panic info + run_podman 1 --debug --log-level=panic version is "$output" "Setting --log-level and --debug is not allowed" } diff --git a/test/system/005-info.bats b/test/system/005-info.bats index 020b351f95..2edbb26c4a 100644 --- a/test/system/005-info.bats +++ b/test/system/005-info.bats @@ -47,6 +47,7 @@ host.cgroupVersion | v[12] host.networkBackendInfo | .*dns.*package.* host.ociRuntime.path | $expr_path host.pasta | .*executable.*package.* +host.rootlessNetworkCmd | pasta store.configFile | $expr_path store.graphDriverName | [a-z0-9]\\\+\\\$ store.graphRoot | $expr_path diff --git a/test/system/010-images.bats b/test/system/010-images.bats index 977c58684d..074359c0d1 100644 --- a/test/system/010-images.bats +++ b/test/system/010-images.bats @@ -312,6 +312,15 @@ Deleted: $pauseID" is "$output" "Error: bogus: image not known" "Should print error" run_podman image rm --force bogus is "$output" "" "Should print no output" + + random_image_name=$(random_string) + random_image_name=${random_image_name,,} # name must be lowercase + run_podman image tag $IMAGE $random_image_name + run_podman image rm --force bogus $random_image_name + assert "$output" = "Untagged: localhost/$random_image_name:latest" "removed image" + + run_podman images + assert "$output" !~ "$random_image_name" "image must be removed" } @test "podman images - commit docker with comment" { diff --git a/test/system/012-manifest.bats b/test/system/012-manifest.bats index 1e672a9490..8e1a507bcb 100644 --- a/test/system/012-manifest.bats +++ b/test/system/012-manifest.bats @@ -131,7 +131,7 @@ EOF # Push to local registry; the magic key here is --add-compression... local manifestpushed="localhost:${PODMAN_LOGIN_REGISTRY_PORT}/test:1.0" - run_podman manifest push --authfile=$authfile --all --add-compression zstd --tls-verify=false $manifestlocal $manifestpushed + run_podman manifest push --authfile=$authfile --all --compression-format gzip --add-compression zstd --tls-verify=false $manifestlocal $manifestpushed # ...and use skopeo to confirm that each component has the right settings echo "$_LOG_PROMPT skopeo inspect ... $manifestpushed" @@ -150,4 +150,134 @@ EOF run_podman image prune -f } +function manifestListAddArtifactOnce() { + echo listFlags="$listFlags" + echo platformFlags="$platformFlags" + echo typeFlag="$typeFlag" + echo layerTypeFlag="$layerTypeFlag" + echo configTypeFlag="$configTypeFlag" + echo configFlag="$configFlag" + echo titleFlag="$titleFlag" + local index artifact firstdigest seconddigest config configSize defaulttype filetitle requested expected actual + run_podman manifest create $listFlags $list + run_podman manifest add $list ${platformFlags} --artifact ${typeFlag} ${layerTypeFlag} ${configTypeFlag} ${configFlag} ${titleFlag} ${PODMAN_TMPDIR}/listed.txt + run_podman manifest add $list ${platformFlags} --artifact ${typeFlag} ${layerTypeFlag} ${configTypeFlag} ${configFlag} ${titleFlag} ${PODMAN_TMPDIR}/zeroes + run_podman manifest inspect $list + run_podman tag $list localhost:${PODMAN_LOGIN_REGISTRY_PORT}/test + run_podman manifest push --tls-verify=false localhost:${PODMAN_LOGIN_REGISTRY_PORT}/test + run skopeo inspect --tls-verify=false --raw docker://localhost:${PODMAN_LOGIN_REGISTRY_PORT}/test + assert $status -eq 0 + echo "$output" + index="$output" + if [[ -n "$listFlags" ]] ; then + assert $(jq -r '.annotations["global"]' <<<"$index") == local + fi + if [[ -n "$platformFlags" ]] ; then + assert $(jq -r '.manifests[1].platform.os' <<<"$index") == linux + assert $(jq -r '.manifests[1].platform.architecture' <<<"$index") == amd64 + fi + if [[ -n "$typeFlag" ]] ; then + actual=$(jq -r '.manifests[0].artifactType' <<<"$index") + assert "${actual#null}" == "${typeFlag#--artifact-type=}" + actual=$(jq -r '.manifests[1].artifactType' <<<"$index") + assert "${actual#null}" == "${typeFlag#--artifact-type=}" + fi + firstdigest=$(jq -r '.manifests[0].digest' <<<"$index") + seconddigest=$(jq -r '.manifests[1].digest' <<<"$index") + for digest in $firstdigest $seconddigest ; do + case $digest in + $firstdigest) + filetitle=listed.txt + defaulttype=text/plain + ;; + $seconddigest) + filetitle=zeroes + defaulttype=application/octet-stream + ;; + *) + false + ;; + esac + run skopeo inspect --raw --tls-verify=false docker://localhost:${PODMAN_LOGIN_REGISTRY_PORT}/test@${digest} + assert $status -eq 0 + echo "$output" + artifact="$output" + if [[ -n "$typeFlag" ]] ; then + actual=$(jq -r '.artifactType' <<<"$artifact") + assert "${actual#null}" == "${typeFlag#--artifact-type=}" + else + actual=$(jq -r '.artifactType' <<<"$artifact") + assert "${actual}" == application/vnd.unknown.artifact.v1 + fi + if [ -n "$layerTypeFlag" ] ; then + actual=$(jq -r '.layers[0].mediaType' <<<"$artifact") + assert "${actual}" == "${layerTypeFlag#--artifact-layer-type=}" + else + actual=$(jq -r '.layers[0].mediaType' <<<"$artifact") + assert "${actual}" == "$defaulttype" + fi + requested=${configTypeFlag#--artifact-config-type=} + actual=$(jq -r '.config.mediaType' <<<"$artifact") + if test -n "$requested" ; then + assert "$actual" == "$requested" + else + config=${configFlag#--artifact-config=} + if [ -z "$config" ] ; then + expected=application/vnd.oci.empty.v1+json + else + configSize=$(wc -c <"$config") + if [ $configSize -gt 0 ] ; then + expected=application/vnd.oci.image.config.v1+json + else + expected=application/vnd.oci.empty.v1+json + fi + fi + assert "$actual" == "$expected" + fi + if test -n "$titleFlag" ; then + assert $(jq -r '.layers[0].annotations["org.opencontainers.image.title"]' <<<"$artifact") == null + else + assert $(jq -r '.layers[0].annotations["org.opencontainers.image.title"]' <<<"$artifact") == $filetitle + fi + done + run_podman rmi $list localhost:${PODMAN_LOGIN_REGISTRY_PORT}/test +} + +@test "manifest list --add --artifact" { + # Build a list and add some files to it, making sure to exercise and verify + # every flag available. + skip_if_remote "running a local registry doesn't work with podman-remote" + start_registry + run_podman login --tls-verify=false \ + --username ${PODMAN_LOGIN_USER} \ + --password-stdin \ + --authfile=$authfile \ + localhost:${PODMAN_LOGIN_REGISTRY_PORT} <<<"${PODMAN_LOGIN_PASS}" + local list="test:1.0" + truncate -s 20M ${PODMAN_TMPDIR}/zeroes + echo oh yeah > ${PODMAN_TMPDIR}/listed.txt + echo '{}' > ${PODMAN_TMPDIR}/minimum-config.json + local listFlags platformFlags typeFlag configTypeFlag configFlag layerTypeFlag titleFlag + for listFlags in "" "--annotation global=local" ; do + manifestListAddArtifactOnce + done + for platformFlags in "" "--os=linux --arch=amd64" ; do + manifestListAddArtifactOnce + done + for typeFlag in "" --artifact-type="" --artifact-type=application/octet-stream --artifact-type=text/plain ; do + manifestListAddArtifactOnce + done + for configTypeFlag in "" --artifact-config-type=application/octet-stream --artifact-config-type=text/plain ; do + for configFlag in "" --artifact-config= --artifact-config=${PODMAN_TMPDIR}/minimum-config.json ; do + manifestListAddArtifactOnce + done + done + for layerTypeFlag in "" --artifact-layer-type=application/octet-stream --artifact-layer-type=text/plain ; do + manifestListAddArtifactOnce + done + for titleFlag in "" "--artifact-exclude-titles" ; do + manifestListAddArtifactOnce + done + stop_registry +} # vim: filetype=sh diff --git a/test/system/030-run.bats b/test/system/030-run.bats index a5f4e46a19..5f91adb48d 100644 --- a/test/system/030-run.bats +++ b/test/system/030-run.bats @@ -37,10 +37,7 @@ echo $rand | 0 | $rand # a way to do so. eval set "$cmd" - # FIXME: The $expected diff -u ${expected} ${logfile} + + run_podman rm -f -t0 logtest } @test "podman logs restarted - k8s-file" { @@ -171,6 +167,7 @@ function _log_test_restarted() { run_podman --events-backend=file logs test run_podman 125 --events-backend=file logs --follow test is "$output" "Error: using --follow with the journald --log-driver but without the journald --events-backend (file) is not supported" "journald logger requires journald eventer" + run_podman rm test } function _log_test_since() { diff --git a/test/system/037-runlabel.bats b/test/system/037-runlabel.bats index 3056d6aca7..b59b8c1f88 100644 --- a/test/system/037-runlabel.bats +++ b/test/system/037-runlabel.bats @@ -12,16 +12,16 @@ load helpers rand3=$(random_string 30) cat >$containerfile < container" + run_podman rm -f $cid } @test "podman start --all --filter" { @@ -84,6 +90,8 @@ load helpers run_podman wait $cid_exited_0 $cid_exited_1 run_podman start --all --filter exited=0 is "$output" "$cid_exited_0" + + run_podman rm -f $cid_exited_0 $cid_exited_1 } @test "podman start print IDs or raw input" { diff --git a/test/system/050-stop.bats b/test/system/050-stop.bats index 9d2f5874fb..a1e8b55f8f 100644 --- a/test/system/050-stop.bats +++ b/test/system/050-stop.bats @@ -39,9 +39,13 @@ load helpers @test "podman stop --all" { # Start three containers, create (without running) a fourth run_podman run -d --name c1 $IMAGE sleep 20 + cid1="$output" run_podman run -d --name c2 $IMAGE sleep 40 + cid2="$output" run_podman run -d --name c3 $IMAGE sleep 60 + cid3="$output" run_podman create --name c4 $IMAGE sleep 80 + cid4="$output" # podman ps (without -a) should show the three running containers run_podman ps --sort names --format '{{.Names}}--{{.Status}}' @@ -71,6 +75,8 @@ load helpers is "${lines[1]}" "c2--Exited.*" "ps -a, second stopped container" is "${lines[2]}" "c3--Exited.*" "ps -a, third stopped container" is "${lines[3]}" "c4--Created.*" "ps -a, created container (unaffected)" + + run_podman rm $cid1 $cid2 $cid3 $cid4 } @test "podman stop print IDs or raw input" { @@ -194,6 +200,8 @@ load helpers # Exit code should be 137 as it was killed run_podman inspect --format '{{.State.ExitCode}}' stopme is "$output" "137" "Exit code of killed container" + + run_podman rm stopme } @test "podman stop -t 1 Generate warning" { diff --git a/test/system/055-rm.bats b/test/system/055-rm.bats index 7027753cdd..cfd2adf009 100644 --- a/test/system/055-rm.bats +++ b/test/system/055-rm.bats @@ -111,16 +111,23 @@ load helpers is "$output" "Error: no container with ID or name \"bogus\" found: no such container" "Should print error" run_podman container rm --force bogus is "$output" "" "Should print no output" + + run_podman create --name test $IMAGE + run_podman container rm --force bogus test + assert "$output" = "test" "should delete test" + + run_podman ps -a -q + assert "$output" = "" "container should be removed" } function __run_healthcheck_container() { run_podman run -d --name $1 \ --health-cmd /bin/false \ --health-interval 1s \ - --health-retries 2 \ + --health-retries 1 \ --health-timeout 1s \ --health-on-failure=stop \ - --stop-timeout=2 \ + --stop-timeout=1 \ --health-start-period 0 \ --stop-signal SIGTERM \ $IMAGE sleep infinity @@ -149,7 +156,7 @@ function __run_healthcheck_container() { assert "$output" =~ "Error: cannot remove container $cid as it is .* - running or paused containers cannot be removed without force: container state improper" \ "Expected error message from podman rm" rm_failures=$((rm_failures + 1)) - sleep 1 + sleep 0.5 done # At this point, container should be gone diff --git a/test/system/060-mount.bats b/test/system/060-mount.bats index fbc925a47c..64b2b3b8df 100644 --- a/test/system/060-mount.bats +++ b/test/system/060-mount.bats @@ -249,13 +249,10 @@ EOF reported_mountpoint=$(echo "$output" | awk '{print $2}') is "$reported_mountpoint" "$mount_path" "mountpoint reported by 'podman mount'" - # umount, and make sure files are gone + # umount, and make sure mountpoint no longer exists run_podman umount $external_cid - if [ -d "$mount_path" ]; then - # Under VFS, mountpoint always exists even despite umount - if [[ "$(podman_storage_driver)" != "vfs" ]]; then - die "'podman umount' did not umount $mount_path" - fi + if findmnt "$mount_path" >/dev/null ; then + die "'podman umount' did not umount $mount_path" fi buildah rm $external_cid } @@ -286,17 +283,18 @@ EOF is "$output" ".*$v1a" "podman images --inspect should include $v1a" is "$output" ".*$v1b" "podman images --inspect should include $v1b" - run_podman create --rm --mount type=glob,src=${PODMAN_TMPDIR}/v1\*,ro $IMAGE ls $vol1a $vol1b + run_podman create --mount type=glob,src=${PODMAN_TMPDIR}/v1\*,ro $IMAGE ls $vol1a $vol1b cid=$output - run_podman container inspect $output + run_podman container inspect $cid is "$output" ".*$vol1a" "podman images --inspect should include $vol1a" is "$output" ".*$vol1b" "podman images --inspect should include $vol1b" + run_podman rm $cid - run_podman 125 run --rm --mount source=${PODMAN_TMPDIR}/v2\*,type=bind,ro=false $IMAGE touch $vol2 + run_podman 125 run --rm --mount type=bind,source=${PODMAN_TMPDIR}/v2\*,ro=false $IMAGE touch $vol2 is "$output" "Error: must set volume destination" "Bind mounts require destination" - run_podman 125 run --rm --mount source=${PODMAN_TMPDIR}/v2\*,destination=/tmp/foobar, ro=false $IMAGE touch $vol2 - is "$output" "Error: invalid reference format" "Default mounts don not support globs" + run_podman 125 run --rm --mount type=bind,source=${PODMAN_TMPDIR}/v2\*,destination=/tmp/foobar,ro=false $IMAGE touch $vol2 + is "$output" "Error: statfs ${PODMAN_TMPDIR}/v2*: no such file or directory" "Bind mount should not interpret glob and must use as is" mkdir $PODMAN_TMPDIR/foo1 $PODMAN_TMPDIR/foo2 $PODMAN_TMPDIR/foo3 touch $PODMAN_TMPDIR/foo1/bar $PODMAN_TMPDIR/foo2/bar $PODMAN_TMPDIR/foo3/bar @@ -307,6 +305,7 @@ EOF is "$output" "bar1.*bar2.*bar3" "Should match multiple source files on single destination directory" } +# bats test_tags=distro-integration @test "podman mount noswap memory mounts" { # tmpfs+noswap new in kernel 6.x, mid-2023; likely not in RHEL for a while if ! is_rootless; then diff --git a/test/system/065-cp.bats b/test/system/065-cp.bats index 0a2b155374..191cc885b5 100644 --- a/test/system/065-cp.bats +++ b/test/system/065-cp.bats @@ -360,19 +360,25 @@ load helpers while read src dest dest_fullname description; do run_podman cp $srcdir/$src destrunning:$dest run_podman exec destrunning cat $dest_fullname/hostfile0 $dest_fullname/hostfile1 - is "$(echo $output)" "${randomcontent[*]}" "$description (cp -> ctr:$dest - RUNNING)" + is "${lines[*]}" "${randomcontent[*]}" "$description (cp -> ctr:$dest - RUNNING)" done < <(parse_table "$tests") run_podman rm -t 0 -f destrunning # CREATED container + run_podman create --name destcreated --workdir=/srv $cpimage sleep infinity while read src dest dest_fullname description; do - run_podman create --name destcreated --workdir=/srv $cpimage sleep infinity run_podman cp $srcdir/$src destcreated:$dest - run_podman start destcreated + # tests checks are done below + done < <(parse_table "$tests") + + # Now do the test checks, it is a bit ugly that we do this over two loops but this + # makes the test faster as we do not have to start/stop the container on every iteration. + run_podman start destcreated + while read src dest dest_fullname description; do run_podman exec destcreated cat $dest_fullname/hostfile0 $dest_fullname/hostfile1 - is "$(echo $output)" "${randomcontent[*]}" "$description (cp -> ctr:$dest - CREATED)" - run_podman rm -t 0 -f destcreated + is "${lines[*]}" "${randomcontent[*]}" "$description (cp -> ctr:$dest - CREATED)" done < <(parse_table "$tests") + run_podman rm -t 0 -f destcreated run_podman create --name destnotdir --workdir=/srv $cpimage sleep infinity run_podman 125 cp $srcdir destnotdir:/etc/os-release diff --git a/test/system/070-build.bats b/test/system/070-build.bats index 9299e0954a..cef56e7bf1 100644 --- a/test/system/070-build.bats +++ b/test/system/070-build.bats @@ -246,16 +246,23 @@ FROM $IMAGE RUN echo hi EOF - local count=30 + local count=10 for i in $(seq --format '%02g' 1 $count); do timeout --foreground -v --kill=10 60 \ - $PODMAN build -t i$i $PODMAN_TMPDIR &>/dev/null & + $PODMAN build -t i$i $PODMAN_TMPDIR &> $PODMAN_TMPDIR/log.$i & done # Wait for all background builds to complete. Note that this succeeds # even if some of the individual builds fail! Our actual test is below. wait + # For debugging, e.g., #21742 + for log in $PODMAN_TMPDIR/log.*;do + echo + echo $log ":" + cat $log + done + # Now delete all built images. If any image wasn't built, rmi will fail # and test will fail. run_podman rmi $(seq --format 'i%02g' 1 $count) @@ -353,11 +360,11 @@ LABEL $label_name=$label_value WORKDIR $workdir # Test for #7094 - chowning of invalid symlinks -RUN mkdir -p /a/b/c -RUN ln -s /no/such/nonesuch /a/b/c/badsymlink -RUN ln -s /bin/mydefaultcmd /a/b/c/goodsymlink -RUN touch /a/b/c/myfile -RUN chown -h 1:2 /a/b/c/badsymlink /a/b/c/goodsymlink && chown -h 4:5 /a/b/c/myfile +RUN mkdir -p /a/b/c && \ + ln -s /no/such/nonesuch /a/b/c/badsymlink && \ + ln -s /bin/mydefaultcmd /a/b/c/goodsymlink && \ + touch /a/b/c/myfile && \ + chown -h 1:2 /a/b/c/badsymlink /a/b/c/goodsymlink && chown -h 4:5 /a/b/c/myfile VOLUME /a/b/c # Test for environment passing and override @@ -367,17 +374,16 @@ ENV MYENV3 this-should-be-overridden-by-env-file ENV MYENV4 this-should-be-overridden-by-cmdline ENV http_proxy http-proxy-in-image ENV ftp_proxy ftp-proxy-in-image -ADD mycmd /bin/mydefaultcmd -RUN chmod 755 /bin/mydefaultcmd -RUN chown 2:3 /bin/mydefaultcmd - -RUN $CAT_SECRET -RUN echo explicit-build-arg=\$arg_explicit -RUN echo implicit-build-arg=\$arg_implicit +ADD mycmd /bin/mydefaultcmd +RUN chmod 755 /bin/mydefaultcmd && \ + chown 2:3 /bin/mydefaultcmd && \ + $CAT_SECRET && \ + echo explicit-build-arg=\$arg_explicit && \ + echo implicit-build-arg=\$arg_implicit && \ + cat /etc/resolv.conf CMD ["/bin/mydefaultcmd","$s_echo"] -RUN cat /etc/resolv.conf EOF # The goal is to test that a missing value will be inherited from @@ -451,7 +457,8 @@ EOF --env-file=$PODMAN_TMPDIR/env-file1 \ --env-file=$PODMAN_TMPDIR/env-file2 \ build_test \ - printenv http_proxy https_proxy ftp_proxy + sh -c "printenv http_proxy https_proxy ftp_proxy && + pwd" is "${lines[0]}" "http-proxy-in-env-file" "env-file overrides env" is "${lines[1]}" "https-proxy-in-env-file" "env-file sets proxy var" @@ -462,8 +469,7 @@ EOF fi # test that workdir is set for command-line commands also - run_podman run --rm build_test pwd - is "$output" "$workdir" "pwd command in container" + is "${lines[3]}" "$workdir" "pwd command in container" # Determine buildah version, so we can confirm it gets into Labels # Multiple --format options confirm command-line override (last one wins) @@ -487,10 +493,8 @@ Cmd[0] | /bin/mydefaultcmd Cmd[1] | $s_echo WorkingDir | $workdir Labels.$label_name | $label_value +Labels.\"io.buildah.version\" | $buildah_version " - # FIXME: 2021-02-24: Fixed in buildah #3036; re-enable this once podman - # vendors in a newer buildah! - # Labels.\"io.buildah.version\" | $buildah_version parse_table "$tests" | while read field expect; do actual=$(jq -r ".[0].Config.$field" <<<"$output") @@ -502,19 +506,18 @@ Labels.$label_name | $label_value # get here because any 'podman run' on a volume that had symlinks, # be they dangling or valid, would barf with # Error: chown /_data/symlink: ENOENT - run_podman run --rm build_test stat -c'%u:%g:%N' /a/b/c/badsymlink - is "$output" "1:2:'/a/b/c/badsymlink' -> '/no/such/nonesuch'" \ - "bad symlink to nonexistent file is chowned and preserved" + run_podman run --rm build_test \ + stat -c'%u:%g:%N' /a/b/c/badsymlink \ + /a/b/c/goodsymlink \ + /bin/mydefaultcmd \ + /a/b/c/myfile - run_podman run --rm build_test stat -c'%u:%g:%N' /a/b/c/goodsymlink - is "$output" "1:2:'/a/b/c/goodsymlink' -> '/bin/mydefaultcmd'" \ + is "${lines[0]}" "1:2:'/a/b/c/badsymlink' -> '/no/such/nonesuch'" \ + "bad symlink to nonexistent file is chowned and preserved" + is "${lines[1]}" "1:2:'/a/b/c/goodsymlink' -> '/bin/mydefaultcmd'" \ "good symlink to existing file is chowned and preserved" - - run_podman run --rm build_test stat -c'%u:%g' /bin/mydefaultcmd - is "$output" "2:3" "target of symlink is not chowned" - - run_podman run --rm build_test stat -c'%u:%g:%N' /a/b/c/myfile - is "$output" "4:5:/a/b/c/myfile" "file in volume is chowned" + is "${lines[2]}" "2:3:/bin/mydefaultcmd" "target of symlink is not chowned" + is "${lines[3]}" "4:5:/a/b/c/myfile" "file in volume is chowned" # Hey, as long as we have an image with lots of layers, let's # confirm that 'image tree' works as expected @@ -1107,6 +1110,12 @@ EOF run_podman build -t build_test $tmpdir/link } +@test "podman build --squash --squash-all should conflict" { + echo FROM scratch > $PODMAN_TMPDIR/Dockerfile + run_podman 125 build -t build_test --squash-all --squash $PODMAN_TMPDIR + is "$output" "Error: cannot specify --squash-all with --squash" "--squash and --sqaush-all should conflict" +} + @test "podman build --volumes-from conflict" { rand_content=$(random_string 50) @@ -1125,6 +1134,7 @@ EOF run_podman run --rm --volumes-from test_ctr $IMAGE echo $rand_content is "$output" "$rand_content" "No error should be thrown about volume in use" + run_podman rm -f -v -t0 test_ctr run_podman rmi -f build_test } @@ -1159,4 +1169,9 @@ function teardown() { basic_teardown } +@test "podman build --help defaults" { + run_podman build --help + assert "$output" =~ "--pull.*(default \"missing\")" "pull should default to missing" +} + # vim: filetype=sh diff --git a/test/system/075-exec.bats b/test/system/075-exec.bats index f24d34d534..3aed8ff2c0 100644 --- a/test/system/075-exec.bats +++ b/test/system/075-exec.bats @@ -33,6 +33,9 @@ load helpers run_podman 127 exec $cid /no/such/command is "$output" ".*such file or dir" "podman exec /no/such/command" + run_podman 125 exec $cid + is "$output" ".*must provide a non-empty command to start an exec session" "podman exec must include a command" + # Done. Tell the container to stop. # The '-d' is because container exit is racy: the exec process itself # could get caught and killed by cleanup, causing this step to exit 137 @@ -133,20 +136,6 @@ load helpers run_podman rm -t 0 -f $cid } -@test "podman exec --wait" { - skip_if_remote "test is meaningless over remote" - - # wait on bogus container - run_podman 125 exec --wait 5 "bogus_container" echo hello - assert "$output" = "Error: timed out waiting for container: bogus_container" - - run_podman create --name "wait_container" $IMAGE top - run_podman 255 exec --wait 5 "wait_container" echo hello - assert "$output" = "Error: can only create exec sessions on running containers: container state improper" - - run_podman rm -f wait_container -} - @test "podman run umask" { umask="0724" run_podman run --rm -q $IMAGE grep Umask /proc/self/status @@ -228,6 +217,8 @@ load helpers run_podman inspect --format "{{len .ExecIDs}}" $cid assert "$output" = "0" ".ExecIDs must be empty" + + run_podman rm -f -t0 $cid } # 'exec --preserve-fd' passes a list of additional file descriptors into the container @@ -250,6 +241,8 @@ load helpers assert "${lines[0]}" !~ [123][0-9] "/proc/self/fd must not contain 10-39" assert "${lines[1]}" = "fd9" "cat from fd 9" assert "${lines[2]}" = "$content" "cat from fd 40" + + run_podman rm -f -t0 $cid } # vim: filetype=sh diff --git a/test/system/090-events.bats b/test/system/090-events.bats index bac98897f2..739a35c46d 100644 --- a/test/system/090-events.bats +++ b/test/system/090-events.bats @@ -4,43 +4,37 @@ # load helpers +load helpers.network # bats test_tags=distro-integration -@test "events with a filter by label" { +@test "events with a filter by label and --no-trunc option" { cname=test-$(random_string 30 | tr A-Z a-z) labelname=$(random_string 10) labelvalue=$(random_string 15) - run_podman run --label $labelname=$labelvalue --name $cname --rm $IMAGE ls + before=$(date --iso-8601=seconds) + run_podman run -d --label $labelname=$labelvalue --name $cname --rm $IMAGE true + id="$output" - expect=".* container start [0-9a-f]\+ (image=$IMAGE, name=$cname,.* ${labelname}=${labelvalue}" - run_podman events --filter type=container -f container=$cname --filter label=${labelname}=${labelvalue} --filter event=start --stream=false + expect=".* container start $id (image=$IMAGE, name=$cname,.* ${labelname}=${labelvalue}" + run_podman events --since "$before" --filter type=container -f container=$cname --filter label=${labelname}=${labelvalue} --filter event=start --stream=false is "$output" "$expect" "filtering by container name and label" # Same thing, but without the container-name filter - run_podman system events -f type=container --filter label=${labelname}=${labelvalue} --filter event=start --stream=false + run_podman system events --since "$before" -f type=container --filter label=${labelname}=${labelvalue} --filter event=start --stream=false is "$output" "$expect" "filtering just by label" # Now filter just by container name, no label - run_podman events --filter type=container --filter container=$cname --filter event=start --stream=false + run_podman events --since "$before" --filter type=container --filter container=$cname --filter event=start --stream=false is "$output" "$expect" "filtering just by container" -} - -@test "truncate events" { - cname=test-$(random_string 30 | tr A-Z a-z) - - run_podman run -d --name=$cname --rm $IMAGE echo hi - id="$output" - - run_podman events --filter container=$cname --filter event=start --stream=false - is "$output" ".* $id " "filtering by container name full id" + # check --no-trunc=false truncID=${id:0:12} - run_podman events --filter container=$cname --filter event=start --stream=false --no-trunc=false + run_podman events --since "$before" --filter container=$cname --filter event=start --stream=false --no-trunc=false is "$output" ".* $truncID " "filtering by container name trunc id" # --no-trunc does not affect --format; we always get the full ID - run_podman events --filter container=$cname --filter event=died --stream=false --format='{{.ID}}--{{.Image}}' --no-trunc=false + run_podman events --since "$before" --filter container=$cname --filter event=died --stream=false --format='{{.ID}}--{{.Image}}' --no-trunc=false assert "$output" = "${id}--${IMAGE}" } @@ -57,12 +51,15 @@ load helpers t0=$(date --iso-8601=seconds) tag=registry.com/$(random_string 10 | tr A-Z a-z) + bogus_image="localhost:$(random_free_port)/bogus" + # Force using the file backend since the journal backend is eating events # (see containers/podman/pull/10219#issuecomment-842325032). run_podman --events-backend=file push $IMAGE dir:$pushedDir run_podman --events-backend=file save $IMAGE -o $tarball run_podman --events-backend=file load -i $tarball run_podman --events-backend=file pull docker-archive:$tarball + run_podman 125 --events-backend=file pull --retry 0 $bogus_image run_podman --events-backend=file tag $IMAGE $tag run_podman --events-backend=file untag $IMAGE $tag run_podman --events-backend=file tag $IMAGE $tag @@ -74,6 +71,7 @@ load helpers .*image save $imageID $tarball .*image loadfromarchive $imageID $tarball .*image pull $imageID docker-archive:$tarball +.*image pull-error $bogus_image .*pinging container registry localhost.*connection refused .*image tag $imageID $tag .*image untag $imageID $tag:latest .*image tag $imageID $tag @@ -87,6 +85,7 @@ load helpers "save--$tarball" "loadfromarchive--$tarball" "pull--docker-archive:$tarball" + "pull-error--$bogus_image" "tag--$tag" "untag--$tag:latest" "tag--$tag" @@ -135,7 +134,7 @@ function _events_disjunctive_filters() { run_podman 125 --events-backend=file logs --follow test is "$output" "Error: using --follow with the journald --log-driver but without the journald --events-backend (file) is not supported" \ "Should fail with reasonable error message when events-backend and events-logger do not match" - + run_podman rm test } @test "events with disjunctive filters - default" { @@ -223,9 +222,9 @@ EOF # same amount of events. We checked the contents before. CONTAINERS_CONF_OVERRIDE=$containersConf run_podman events --stream=false --since="2022-03-06T11:26:42.723667984+02:00" --format=json assert "${#lines[@]}" = 52 "Number of events returned" - is "${lines[0]}" "{\"Name\":\"$eventsFile\",\"Status\":\"log-rotation\",\"Time\":\".*\",\"Type\":\"system\",\"Attributes\":{\"io.podman.event.rotate\":\"begin\"}}" - is "${lines[-2]}" "{\"Name\":\"$eventsFile\",\"Status\":\"log-rotation\",\"Time\":\".*\",\"Type\":\"system\",\"Attributes\":{\"io.podman.event.rotate\":\"end\"}}" - is "${lines[-1]}" "{\"ID\":\"$ctrID\",\"Image\":\"$IMAGE\",\"Name\":\".*\",\"Status\":\"remove\",\"Time\":\".*\",\"Type\":\"container\",\"Attributes\":{.*}}" + is "${lines[0]}" "{\"Name\":\"$eventsFile\",\"Status\":\"log-rotation\",\"time\":[0-9]\+,\"timeNano\":[0-9]\+,\"Type\":\"system\",\"Attributes\":{\"io.podman.event.rotate\":\"begin\"}}" + is "${lines[-2]}" "{\"Name\":\"$eventsFile\",\"Status\":\"log-rotation\",\"time\":[0-9]\+,\"timeNano\":[0-9]\+,\"Type\":\"system\",\"Attributes\":{\"io.podman.event.rotate\":\"end\"}}" + is "${lines[-1]}" "{\"ID\":\"$ctrID\",\"Image\":\"$IMAGE\",\"Name\":\".*\",\"Status\":\"remove\",\"time\":[0-9]\+,\"timeNano\":[0-9]\+,\"Type\":\"container\",\"Attributes\":{.*}}" } @test "events log-file no duplicates" { @@ -292,10 +291,10 @@ EOF # Make sure that the JSON stream looks as expected. That means it has all # events and no duplicates. run cat $eventsJSON - is "${lines[0]}" "{\"Name\":\"busybox\",\"Status\":\"pull\",\"Time\":\"2022-04-06T11:26:42.7236679+02:00\",\"Type\":\"image\",\"Attributes\":null}" - is "${lines[99]}" "{\"Name\":\"busybox\",\"Status\":\"pull\",\"Time\":\"2022-04-06T11:26:42.723667999+02:00\",\"Type\":\"image\",\"Attributes\":null}" - is "${lines[100]}" "{\"Name\":\"$eventsFile\",\"Status\":\"log-rotation\",\"Time\":\".*\",\"Type\":\"system\",\"Attributes\":{\"io.podman.event.rotate\":\"end\"}}" - is "${lines[103]}" "{\"ID\":\"$ctrID\",\"Image\":\"$IMAGE\",\"Name\":\".*\",\"Status\":\"remove\",\"Time\":\".*\",\"Type\":\"container\",\"Attributes\":{.*}}" + is "${lines[0]}" "{\"Name\":\"busybox\",\"Status\":\"pull\",\"time\":1649237202,\"timeNano\":1649237202723[0-9]\+,\"Type\":\"image\",\"Attributes\":null}" + is "${lines[99]}" "{\"Name\":\"busybox\",\"Status\":\"pull\",\"time\":1649237202,\"timeNano\":1649237202723[0-9]\+,\"Type\":\"image\",\"Attributes\":null}" + is "${lines[100]}" "{\"Name\":\"$eventsFile\",\"Status\":\"log-rotation\",\"time\":[0-9]\+,\"timeNano\":[0-9]\+,\"Type\":\"system\",\"Attributes\":{\"io.podman.event.rotate\":\"end\"}}" + is "${lines[103]}" "{\"ID\":\"$ctrID\",\"Image\":\"$IMAGE\",\"Name\":\".*\",\"Status\":\"remove\",\"time\":[0-9]\+,\"timeNano\":[0-9]\+,\"Type\":\"container\",\"Attributes\":{.*}}" } # Prior to #15633, container labels would not appear in 'die' log events @@ -359,6 +358,8 @@ EOF --stream=false assert "$output" != ".*ConmonPidFile.*" assert "$output" != ".*EffectiveCaps.*" + + run_podman rm $cname } @test "events - container inspect data - journald" { @@ -400,3 +401,8 @@ EOF run_podman events --since=1m --stream=false --filter volume=${vname:0:5} assert "$output" = "$notrunc_results" } + +@test "events - invalid filter" { + run_podman 125 events --since="the dawn of time...ish" + assert "$output" =~ "failed to parse event filters" +} diff --git a/test/system/120-load.bats b/test/system/120-load.bats index 0372ac090e..5cfed4417b 100644 --- a/test/system/120-load.bats +++ b/test/system/120-load.bats @@ -15,6 +15,7 @@ function teardown() { # annotations and image digests may be different. See # https://github.com/containers/podman/discussions/17911 run_podman rmi -a -f + _prefetch $IMAGE basic_teardown } @@ -88,9 +89,11 @@ verify_iid_and_name() { skip "impossible due to pitfalls in our SSH implementation" fi - # See https://github.com/containers/podman/pull/21431 - if [[ -n "$PODMAN_IGNORE_CGROUPSV1_WARNING" ]]; then - skip "impossible to test due to pitfalls in our SSH implementation" + # FIXME: Broken on debian SID systemd 256 <= rc3 + # See https://github.com/containers/podman/pull/23020#issuecomment-2179284640 + OS_RELEASE_ID="${OS_RELEASE_ID:-$(source /etc/os-release; echo $ID)}" + if [[ "$OS_RELEASE_ID" == "debian" ]]; then + skip "broken warning about cgroup-manager=systemd and enabling linger" fi # The testing is the same whether we're root or rootless; all that diff --git a/test/system/130-kill.bats b/test/system/130-kill.bats index 013edc3d5c..0a00a57812 100644 --- a/test/system/130-kill.bats +++ b/test/system/130-kill.bats @@ -7,35 +7,18 @@ load helpers # bats test_tags=distro-integration @test "podman kill - test signal handling in containers" { - - # Prepare for 'logs -f' - run_podman info --format '{{.Host.LogDriver}}' - log_driver=$output - run_podman info --format '{{.Host.EventLogger}}' - event_logger=$output - opt_log_driver= - if [ $log_driver = "journald" ] && [ $event_logger != "journald" ]; then - # Since PR#10431, 'logs -f' with journald driver is only supported with journald events backend. - # Set '--log driver' temporally because remote doesn't support '--events-backend'. - opt_log_driver="--log-driver k8s-file" - fi + local cname=c-$(random_string 10) + local fifo=${PODMAN_TMPDIR}/podman-kill-fifo.$(random_string 10) + mkfifo $fifo # Start a container that will handle all signals by emitting 'got: N' local -a signals=(1 2 3 4 5 6 8 10 12 13 14 15 16 20 21 22 23 24 25 26 64) - run_podman run -d ${opt_log_driver} $IMAGE sh -c \ + $PODMAN run --name $cname $IMAGE sh -c \ "for i in ${signals[*]}; do trap \"echo got: \$i\" \$i; done; echo READY; while ! test -e /stop; do sleep 0.1; done; - echo DONE" - cid="$output" - - # Run 'logs -f' on that container, but run it in the background with - # redirection to a named pipe from which we (foreground job) read - # and confirm that signals are received. We can't use run_podman here. - local fifo=${PODMAN_TMPDIR}/podman-kill-fifo.$(random_string 10) - mkfifo $fifo - $PODMAN logs -f $cid >$fifo $fifo $containersConf <&2; sleep inf + - echo a stdout; echo a stderr 1>&2; trap 'exit' SIGTERM; while :; do sleep 0.1; done image: $IMAGE name: a - command: - sh - -c - - echo b stdout; echo b stderr 1>&2; sleep inf + - echo b stdout; echo b stderr 1>&2; trap 'exit' SIGTERM; while :; do sleep 0.1; done image: $IMAGE name: b EOF @@ -452,8 +459,11 @@ $name stderr" "logs work with passthrough" # Kill the pod and make sure the service is not running. run_podman pod kill test_pod for i in {0..20}; do + # echos are for debugging test flakes + echo "$_LOG_PROMPT systemctl is-active $service_name" run systemctl is-active $service_name - if [[ $output == "failed" ]]; then + echo "$output" + if [[ "$output" == "inactive" ]]; then break fi sleep 0.5 @@ -472,6 +482,7 @@ $name stderr" "logs work with passthrough" run_podman 1 container exists $service_container run_podman 1 pod exists test_pod run_podman rmi $(pause_image) + run_podman network rm podman-default-kube-network rm -f $UNIT_DIR/$unit_name } @@ -484,5 +495,23 @@ $name stderr" "logs work with passthrough" is "$output" ".*[DEPRECATED] command:" run_podman generate --help is "$output" ".*\[DEPRECATED\] Generate systemd units" + run_podman rm test +} + +@test "podman passes down the KillSignal and StopTimeout setting" { + ctr=systemd_test_$(random_string 5) + + run_podman run -d --name $ctr --stop-signal 5 --stop-timeout 7 --rm $IMAGE top + run_podman inspect $ctr --format '{{ .Id }}' + id="$output" + + run systemctl show -p TimeoutStopUSec "libpod-${id}.scope" + assert "$output" == "TimeoutStopUSec=7s" + + run systemctl show -p KillSignal "libpod-${id}.scope" + assert "$output" == "KillSignal=5" + + # Clean up + run_podman rm -t 0 -f $ctr } # vim: filetype=sh diff --git a/test/system/252-quadlet.bats b/test/system/252-quadlet.bats index 1dda5518d2..c09e49ed9b 100644 --- a/test/system/252-quadlet.bats +++ b/test/system/252-quadlet.bats @@ -143,21 +143,55 @@ function remove_secret() { run_podman secret rm $secret_name } +function wait_for_journal() { + local step=1 + local count=10 + local expect_str= + + while [ "$#" -gt 0 ]; do + case "$1" in + -s|--step) + step="$2" + shift 2 + ;; + -c|--count) + count="$2" + shift 2 + ;; + *) + expect_str="$1" + shift 1 + ;; + esac + done + + while [ "$count" -gt 0 ]; do + run journalctl "--since=$STARTED_TIME" --unit="$QUADLET_SERVICE_NAME" + if [[ "$output" =~ "$expect_str" ]]; then + return + fi + sleep "$step" + count=$(( count - 1 )) + done + die "Timed out waiting for '$expect_str' in journalctl output" +} + +# bats test_tags=distro-integration @test "quadlet - basic" { + # Network=none is to work around a Pasta bug, can be removed once a patched Pasta is available. + # Ref https://github.com/containers/podman/pull/21563#issuecomment-1965145324 local quadlet_file=$PODMAN_TMPDIR/basic_$(random_string).container cat > $quadlet_file < $quadlet_file <&2; top -d 10" EOF run_quadlet "$quadlet_file" @@ -246,6 +282,18 @@ EOF run_podman container inspect --format "{{.State.Status}}" customcontainername is "$output" "running" "container should be started by systemd and hence be running" + wait_for_journal "Started $QUADLET_SERVICE_NAME" + + run journalctl "--since=$STARTED_TIME" --unit="$QUADLET_SERVICE_NAME" + assert "$output" =~ "$token_out" "Output can be found with journalctl" + assert "$output" =~ "$token_err" "Error can be found with journalctl" + assert "$output" =~ "Starting $QUADLET_SERVICE_NAME" "Status information can be found with journalctl" + + # log priority 3 in journalctl is err. This is documented in syslog(3) + run journalctl "--since=$STARTED_TIME" --priority=3 --unit="$QUADLET_SERVICE_NAME" + assert "$output" =~ "$token_err" "Error can be found with journalctl --priority=3" + assert "$output" !~ "$token_out" "Output can not be found with journalctl --priority=3" + service_cleanup $QUADLET_SERVICE_NAME failed } @@ -319,6 +367,7 @@ EOF is "$output" "with space" service_cleanup $QUADLET_SERVICE_NAME inactive + run_podman volume rm $volume_name } # A quadlet container depends on a quadlet volume @@ -445,6 +494,7 @@ EOF is "$output" "with space" service_cleanup $QUADLET_SERVICE_NAME inactive + run_podman network rm $network_name } # A quadlet container depends on a quadlet network @@ -586,6 +636,7 @@ EOF service_cleanup $QUADLET_SERVICE_NAME inactive run_podman rmi $(pause_image) + run_podman network rm podman-default-kube-network } @test "quadlet kube - named network dependency" { @@ -955,6 +1006,7 @@ EOF done < <(parse_table "$exit_tests") run_podman rmi $(pause_image) + run_podman network rm podman-default-kube-network } @test "quadlet kube - Working Directory" { @@ -1015,6 +1067,7 @@ EOF service_cleanup $QUADLET_SERVICE_NAME inactive run_podman rmi $(pause_image) + run_podman network rm podman-default-kube-network } @test "quadlet - image files" { @@ -1246,6 +1299,7 @@ EOF service_cleanup $pod_service inactive run_podman volume rm $quadlet_kube_volume_name run_podman rmi --ignore $(pause_image) + run_podman network rm podman-default-kube-network } @test "quadlet - kube down force" { @@ -1325,6 +1379,7 @@ EOF # Volume should not exist run_podman 1 volume exists ${quadlet_kube_volume_name} run_podman rmi --ignore $(pause_image) + run_podman network rm podman-default-kube-network } @test "quadlet - image tag" { @@ -1412,6 +1467,7 @@ EOF service_cleanup $container_service failed run_podman image rm --ignore $image_for_test run_podman rmi --ignore $(pause_image) + run_podman volume rm $volume_name } @test "quadlet - pod simple" { @@ -1430,8 +1486,11 @@ EOF cat > $quadlet_container_file < $tmpdir/config.json + echo '{"schemaVersion":2,"mediaType":"application/vnd.oci.image.manifest.v1+json","layers":[' > $tmpdir/manifest + local comma= + for blob in "$@" ; do + local sum=$(sha256sum $blob) + sum=${sum%% *} + local size=$(wc -c $blob) + size=${size%% *} + echo $comma '"sha256:'$sum'"' >> $tmpdir/config.json + echo $comma '{"digest":"sha256:'$sum'","size":'$size',"mediaType":"application/vnd.oci.image.layer.v1.tar"}' >> $tmpdir/manifest + comma=, + done + echo ']}}' >> $tmpdir/config.json + sum=$(sha256sum $tmpdir/config.json) + sum=${sum%% *} + size=$(wc -c $tmpdir/config.json) + size=${size%% *} + echo '],"config":{"digest":"sha256:'$sum'","size":'$size',"mediaType":"application/vnd.oci.image.config.v1+json"}}' >> $tmpdir/manifest + run_podman_testing create-image-data -i $imageID -k sha256:$sum -f $tmpdir/config.json + sum=$(sha256sum $tmpdir/manifest) + sum=${sum%% *} + run_podman_testing create-image-data -i $imageID -k manifest-sha256:$sum -f $tmpdir/manifest + run_podman_testing create-image-data -i $imageID -k manifest -f $tmpdir/manifest +} + +# vim: filetype=sh diff --git a/test/system/410-selinux.bats b/test/system/410-selinux.bats index c7d71643b4..c985e1e65f 100644 --- a/test/system/410-selinux.bats +++ b/test/system/410-selinux.bats @@ -345,7 +345,7 @@ EOF run id -Z user=$(secon -u $output) role=$(secon -r $output) - CONTAINERS_CONF_OVERRIDE=$overrideConf run_podman run $IMAGE cat /proc/self/attr/current + CONTAINERS_CONF_OVERRIDE=$overrideConf run_podman run --rm $IMAGE cat /proc/self/attr/current level=$(secon -l $output) id -Z is "$output" "$user:$role:container_t:$level" "Confined label Correctly" diff --git a/test/system/420-cgroups.bats b/test/system/420-cgroups.bats index 3269f666cb..62cb6aa4ee 100644 --- a/test/system/420-cgroups.bats +++ b/test/system/420-cgroups.bats @@ -37,4 +37,20 @@ load helpers run_podman rm myc } +# bats test_tags=distro-integration +@test "podman run --cgroups=disabled keeps the current cgroup" { + skip_if_remote "podman-remote does not support --cgroups=disabled" + skip_if_rootless_cgroupsv1 + runtime=$(podman_runtime) + if [[ $runtime != "crun" ]]; then + skip "runtime is $runtime; --cgroups=disabled requires crun" + fi + + current_cgroup=$(cat /proc/self/cgroup) + + # --cgroupns=host is required to have full visibility of the cgroup path inside the container + run_podman run --cgroups=disabled --cgroupns=host --rm $IMAGE cat /proc/self/cgroup + is "$output" $current_cgroup "--cgroups=disabled must not change the current cgroup" +} + # vim: filetype=sh diff --git a/test/system/450-interactive.bats b/test/system/450-interactive.bats index 1eaa17ac2b..4536d74491 100644 --- a/test/system/450-interactive.bats +++ b/test/system/450-interactive.bats @@ -102,4 +102,24 @@ function teardown() { is "$output" "hello$CR" "-i=false: no warning" } + +@test "podman run -l passthrough-tty" { + skip_if_remote + + # Requires conmon 2.1.10 or greater + want=2.1.10 + run_podman info --format '{{.Host.Conmon.Path}}' + conmon_path="$output" + conmon_version=$($conmon_path --version | sed -ne 's/^.* version //p') + if ! printf "%s\n%s\n" "$want" "$conmon_version" | sort --check=quiet --version-sort; then + skip "need conmon >= $want; have $conmon_version" + fi + + run tty <$PODMAN_TEST_PTY + expected_tty="$output" + + run_podman run --rm -v/dev:/dev --log-driver=passthrough-tty $IMAGE tty <$PODMAN_TEST_PTY + is "$output" "$expected_tty" "passthrough-tty: tty matches" +} + # vim: filetype=sh diff --git a/test/system/500-networking.bats b/test/system/500-networking.bats index ffb71993c7..330e16edcc 100644 --- a/test/system/500-networking.bats +++ b/test/system/500-networking.bats @@ -10,6 +10,8 @@ load helpers.network heading="NETWORK *ID *NAME *DRIVER" run_podman network ls assert "${lines[0]}" =~ "^$heading\$" "network ls header missing" + run_podman network list + assert "${lines[0]}" =~ "^$heading\$" "network list header missing" run_podman network ls --noheading assert "$output" !~ "$heading" "network ls --noheading shows header anyway" @@ -96,11 +98,14 @@ load helpers.network # force bridge networking also for rootless # this ensures that rootless + bridge + userns + ports works network_arg="--network bridge" - else - # Issue #9828 make sure a custom slir4netns cidr also works + elif has_slirp4netns; then + # Issue #9828 make sure a custom slirp4netns cidr also works network_arg="--network slirp4netns:cidr=$cidr" # slirp4netns interface ip is always .100 match="${cidr%.*}.100" + else + echo "# [skipping subtest of $cidr - slirp4netns unavailable]" >&3 + continue fi # Container will exit as soon as 'nc' receives input @@ -175,6 +180,8 @@ load helpers.network } @test "podman run with slirp4ns assigns correct addresses to /etc/hosts" { + has_slirp4netns || skip "slirp4netns unavailable" + CIDR="$(random_rfc1918_subnet)" IP=$(hostname -I | cut -f 1 -d " ") local conname=con-$(random_string 10) @@ -193,6 +200,8 @@ load helpers.network } @test "podman run with slirp4ns adds correct dns address to resolv.conf" { + has_slirp4netns || skip "slirp4netns unavailable" + CIDR="$(random_rfc1918_subnet)" run_podman run --rm --network slirp4netns:cidr="${CIDR}.0/24" \ $IMAGE cat /etc/resolv.conf @@ -210,6 +219,8 @@ load helpers.network } @test "podman run with slirp4ns assigns correct ip address container" { + has_slirp4netns || skip "slirp4netns unavailable" + CIDR="$(random_rfc1918_subnet)" run_podman run --rm --network slirp4netns:cidr="${CIDR}.0/24" \ $IMAGE sh -c "ip address | grep ${CIDR}" @@ -297,10 +308,10 @@ load helpers.network $IMAGE /bin/busybox-extras httpd -f -p 80 cid=$output - run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}" - ip1="$output" - run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" - mac1="$output" + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}} +{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" + ip1="${lines[0]}" + mac1="${lines[1]}" # Verify http contents: curl from localhost run curl -s -S $SERVER/index.txt @@ -315,9 +326,9 @@ load helpers.network fi run iptables -t nat -F "$chain" - # check that we cannot curl (timeout after 5 sec) - run timeout 5 curl -s $SERVER/index.txt - assert $status -eq 124 "curl did not time out" + # check that we cannot curl (timeout after 1 sec) + run curl --max-time 1 -s $SERVER/index.txt + assert $status -eq 28 "curl did not time out" fi # reload the network to recreate the iptables rules @@ -325,10 +336,10 @@ load helpers.network is "$output" "$cid" "Output does match container ID" # check that we still have the same mac and ip - run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}" - is "$output" "$ip1" "IP address changed after podman network reload" - run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" - is "$output" "$mac1" "MAC address changed after podman network reload" + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}} +{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" + is "${lines[0]}" "$ip1" "IP address changed after podman network reload" + is "${lines[1]}" "$mac1" "MAC address changed after podman network reload" # check that we can still curl run curl -s $SERVER/index.txt @@ -336,20 +347,19 @@ load helpers.network # create second network netname2=testnet-$(random_string 10) - # TODO add --ipv6 and uncomment the ipv6 checks below once cni plugins 1.0 is available on ubuntu CI VMs. - run_podman network create $netname2 + run_podman network create --ipv6 $netname2 is "$output" "$netname2" "output of 'network create'" # connect the container to the second network run_podman network connect $netname2 $cid - run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname2\").IPAddress}}" - ip2="$output" - #run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname2\").GlobalIPv6Address}}" - #is "$output" "fd.*:.*" "IPv6 address should start with fd..." - #ipv6="$output" - run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname2\").MacAddress}}" - mac2="$output" + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname2\").IPAddress}} +{{(index .NetworkSettings.Networks \"$netname2\").GlobalIPv6Address}} +{{(index .NetworkSettings.Networks \"$netname2\").MacAddress}}" + ip2="${lines[0]}" + is "${lines[1]}" "fd.*:.*" "IPv6 address should start with fd..." + ipv6="${lines[1]}" + mac2="${lines[2]}" # make sure --all is working and that this # cmd also works if the iptables still exists @@ -357,16 +367,18 @@ load helpers.network is "$output" "$cid" "Output does match container ID" # check that both network keep there ip and mac - run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}" - is "$output" "$ip1" "IP address changed after podman network reload ($netname)" - run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" - is "$output" "$mac1" "MAC address changed after podman network reload ($netname)" - run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname2\").IPAddress}}" - is "$output" "$ip2" "IP address changed after podman network reload ($netname2)" - #run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname2\").GlobalIPv6Address}}" - #is "$output" "$ipv6" "IPv6 address changed after podman network reload ($netname2)" - run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname2\").MacAddress}}" - is "$output" "$mac2" "MAC address changed after podman network reload ($netname2)" + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}} +{{(index .NetworkSettings.Networks \"$netname\").MacAddress}} +{{(index .NetworkSettings.Networks \"$netname2\").IPAddress}} +{{(index .NetworkSettings.Networks \"$netname2\").GlobalIPv6Address}} +{{(index .NetworkSettings.Networks \"$netname2\").MacAddress}} +" + + is "${lines[0]}" "$ip1" "IP address changed after podman network reload ($netname)" + is "${lines[1]}" "$mac1" "MAC address changed after podman network reload ($netname)" + is "${lines[2]}" "$ip2" "IP address changed after podman network reload ($netname2)" + is "${lines[3]}" "$ipv6" "IPv6 address changed after podman network reload ($netname2)" + is "${lines[4]}" "$mac2" "MAC address changed after podman network reload ($netname2)" # check that we can still curl run curl -s -S $SERVER/index.txt @@ -414,13 +426,15 @@ load helpers.network skip "This test needs an ipv6 nameserver in $resolve_file" fi - # ipv4 slirp - run_podman run --rm --network slirp4netns:enable_ipv6=false $IMAGE cat /etc/resolv.conf - assert "$output" !~ "$ipv6_regex" "resolv.conf should not contain ipv6 nameserver" + if has_slirp4netns; then + # ipv4 slirp + run_podman run --rm --network slirp4netns:enable_ipv6=false $IMAGE cat /etc/resolv.conf + assert "$output" !~ "$ipv6_regex" "resolv.conf should not contain ipv6 nameserver" - # ipv6 slirp - run_podman run --rm --network slirp4netns:enable_ipv6=true $IMAGE cat /etc/resolv.conf - assert "$output" =~ "$ipv6_regex" "resolv.conf should contain ipv6 nameserver" + # ipv6 slirp + run_podman run --rm --network slirp4netns:enable_ipv6=true $IMAGE cat /etc/resolv.conf + assert "$output" =~ "$ipv6_regex" "resolv.conf should contain ipv6 nameserver" + fi # ipv4 cni local mysubnet=$(random_rfc1918_subnet) @@ -466,7 +480,7 @@ load helpers.network run_podman network create $netname2 is "$output" "$netname2" "output of 'network create'" - # First, run a container in background to ensure that the rootless cni ns + # First, run a container in background to ensure that the rootless netns # is not destroyed after network disconnect. run_podman run -d --network $netname $IMAGE top background_cid=$output @@ -486,14 +500,14 @@ load helpers.network run curl --retry 2 --retry-connrefused -s $SERVER/index.txt is "$output" "$random_1" "curl $SERVER/index.txt" - run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}" - ip="$output" - run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" - mac="$output" + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}} +{{(index .NetworkSettings.Networks \"$netname\").MacAddress}} +{{(index .NetworkSettings.Networks \"$netname\").Aliases}}" + ip="${lines[0]}" + mac="${lines[1]}" # check network alias for container short id - run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").Aliases}}" - is "$output" "[${cid:0:12} $hostname]" "short container id and hostname in network aliases" + is "${lines[2]}" "[${cid:0:12} $hostname]" "short container id and hostname in network aliases" # check /etc/hosts for our entry run_podman exec $cid cat /etc/hosts @@ -521,13 +535,12 @@ load helpers.network # check that we have a new ip and mac # if the ip is still the same this whole test turns into a nop - run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}" - new_ip="$output" + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}} +{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" + new_ip="${lines[0]}" assert "$new_ip" != "$ip" \ "IP address did not change after podman network disconnect/connect" - - run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" - assert "$output" != "$mac" \ + assert "${lines[1]}" != "$mac" \ "MAC address did not change after podman network disconnect/connect" # check /etc/hosts for the new entry @@ -540,14 +553,6 @@ load helpers.network run_podman network connect $netname $background_cid is "$output" "" "(re)connect of container with no open ports" - # FIXME FIXME FIXME: #11825: bodhi tests are failing, remote+rootless only, - # with "dnsmasq: failed to create inotify". This error has never occurred - # in CI, and Ed has been unable to reproduce it on 1minutetip. This next - # line is a suggestion from Paul Holzinger for trying to shed light on - # the system context before the failure. This output will be invisible - # if the test passes. - for foo in /proc/\*/fd/*; do readlink -f $foo; done |grep '^/proc/.*inotify' |cut -d/ -f3 | xargs -I '{}' -- ps --no-headers -o '%p %U %a' -p '{}' |uniq -c |sort -n - # connect a second network run_podman network connect $netname2 $cid is "$output" "" "Output should be empty (no errors)" @@ -586,7 +591,11 @@ load helpers.network run_podman network create $netname is "$output" "$netname" "output of 'network create'" - for network in "slirp4netns" "$netname"; do + local -a networks=("$netname") + if has_slirp4netns; then + networks+=("slirp4netns") + fi + for network in "${networks[@]}"; do # Start container with the restart always policy run_podman run -d --name myweb -p "$HOST_PORT:80" \ --restart always \ @@ -713,19 +722,29 @@ nameserver 8.8.8.8" "nameserver order is correct" run_podman run --network host --dns 1.1.1.1 --rm $IMAGE cat /etc/resolv.conf assert "$output" =~ ".*nameserver 1\.1\.1\.1.*" \ "resolv.conf contains 1.1.1.1 nameserver" + + run_podman network rm -f $netname } @test "podman run port forward range" { # we run a long loop of tests lets run all combinations before bailing out defer-assertion-failures + local -a netmodes=("bridge") + # As of podman 5.0, slirp4netns is optional + if has_slirp4netns; then + netmodes+=("slirp4netns:port_handler=slirp4netns" "slirp4netns:port_handler=rootlesskit") + fi # pasta only works rootless - local pasta= if is_rootless; then - pasta=pasta + if has_pasta; then + netmodes+=("pasta") + else + echo "# WARNING: pasta unavailable!" >&3 + fi fi - for netmode in bridge slirp4netns:port_handler=slirp4netns slirp4netns:port_handler=rootlesskit $pasta; do + for netmode in "${netmodes[@]}"; do local range=$(random_free_port_range 3) # die() inside $(...) does not actually stop us. assert "$range" != "" "Could not find free port range" @@ -826,6 +845,12 @@ EOF is "$output" "Error: unable to find network with name or ID bogus: network not found" "Should print error" run_podman network rm -t -1 --force bogus is "$output" "" "Should print no output" + + run_podman network create testnet + run_podman network rm --force bogus testnet + assert "$output" = "testnet" "rm network" + run_podman network ls -q + assert "$output" = "podman" "only podman network listed" } @test "podman network rm --dns-option " { @@ -858,7 +883,7 @@ EOF cid=${output} run_podman inspect --format '{{ .NetworkSettings.Networks }}' $cid if is_rootless; then - is "$output" "map\[slirp4netns:.*" "NeworkSettings should contain one network named slirp4netns" + is "$output" "map\[pasta:.*" "NeworkSettings should contain one network named pasta" else is "$output" "map\[podman:.*" "NeworkSettings should contain one network named podman" fi @@ -901,7 +926,7 @@ EOF local subnet="$(random_rfc1918_subnet).0/29" run_podman network create --subnet $subnet $net1 local cname=con1-$(random_string 10) - local cname2=con2-$(random_string 10) + local cname2= local cname3= local netns_count= @@ -921,11 +946,14 @@ EOF run_podman run --rm --network $net1 $IMAGE true # And now because of all the fun we have to check the same with slirp4netns and pasta because - # that uses slighlty different code paths. Note this would dealock before the fix. + # that uses slightly different code paths. Note this would deadlock before the fix. # https://github.com/containers/podman/issues/21477 - run_podman 1 run --name $cname2 --network slirp4netns --restart on-failure:2 --userns keep-id $IMAGE false - run_podman inspect --format "{{.RestartCount}}" $cname2 - assert "$output" == "2" "RestartCount for failing container with slirp4netns" + if has_slirp4netns; then + cname2=con2-$(random_string 10) + run_podman 1 run --name $cname2 --network slirp4netns --restart on-failure:2 --userns keep-id $IMAGE false + run_podman inspect --format "{{.RestartCount}}" $cname2 + assert "$output" == "2" "RestartCount for failing container with slirp4netns" + fi if is_rootless; then # pasta can only run rootless @@ -954,4 +982,52 @@ EOF assert "$output" = $hostname "/etc/hostname with --uts=host --net=host must be equal to 'uname -n'" } +@test "podman network inspect running containers" { + local cname1=c1-$(random_string 10) + local cname2=c2-$(random_string 10) + local cname3=c3-$(random_string 10) + + local netname=net-$(random_string 10) + local subnet=$(random_rfc1918_subnet) + + run_podman network create --subnet "${subnet}.0/24" $netname + + run_podman network inspect --format "{{json .Containers}}" $netname + assert "$output" == "{}" "no containers on the network" + + run_podman create --name $cname1 --network $netname $IMAGE top + cid1="$output" + run_podman create --name $cname2 --network $netname $IMAGE top + cid2="$output" + + # containers should only be part of the output when they are running + run_podman network inspect --format "{{json .Containers}}" $netname + assert "$output" == "{}" "no running containers on the network" + + # start the containers to setup the network info + run_podman start $cname1 $cname2 + + # also run a third container on different network (should not be part of inspect then) + run_podman run -d --name $cname3 --network podman $IMAGE top + cid3="$output" + + # Map ordering is not deterministic so we check each container one by one + local expect="\{\"name\":\"$cname1\",\"interfaces\":\{\"eth0\":\{\"subnets\":\[\{\"ipnet\":\"${subnet}.2/24\"\,\"gateway\":\"${subnet}.1\"\}\],\"mac_address\":\"[0-9a-f]{2}:.*\"\}\}\}" + run_podman network inspect --format "{{json (index .Containers \"$cid1\")}}" $netname + assert "$output" =~ "$expect" "container 1 on the network" + + local expect="\{\"name\":\"$cname2\",\"interfaces\":\{\"eth0\":\{\"subnets\":\[\{\"ipnet\":\"${subnet}.3/24\"\,\"gateway\":\"${subnet}.1\"\}\],\"mac_address\":\"[0-9a-f]{2}:.*\"\}\}\}" + run_podman network inspect --format "{{json (index .Containers \"$cid2\")}}" $netname + assert "$output" =~ "$expect" "container 2 on the network" + + # container 3 should not be part of the inspect, index does not error if the key does not + # exists so just make sure the cid3 and cname3 are not in the json. + run_podman network inspect --format "{{json .Containers}}" $netname + assert "$output" !~ "$cid3" "container 3 on the network (cid)" + assert "$output" !~ "$cname3" "container 3 on the network (name)" + + run_podman rm -f -t0 $cname1 $cname2 $cname3 + run_podman network rm $netname +} + # vim: filetype=sh diff --git a/test/system/505-networking-pasta.bats b/test/system/505-networking-pasta.bats index 71ab05b797..fb9c517bf8 100644 --- a/test/system/505-networking-pasta.bats +++ b/test/system/505-networking-pasta.bats @@ -18,21 +18,6 @@ function setup() { XFER_FILE="${PODMAN_TMPDIR}/pasta.bin" } -function default_ifname() { - local ip_ver="${1}" - - local expr='[.[] | select(.dst == "default").dev] | .[0]' - ip -j -"${ip_ver}" route show | jq -rM "${expr}" -} - -function default_addr() { - local ip_ver="${1}" - local ifname="${2:-$(default_ifname "${ip_ver}")}" - - local expr='.[0] | .addr_info[0].local' - ip -j -"${ip_ver}" addr show "${ifname}" | jq -rM "${expr}" -} - # _set_opt() - meta-helper for pasta_test_do. # # Sets an option, but panics if option is already set (e.g. UDP+TCP, IPv4/v6) @@ -245,7 +230,7 @@ function pasta_test_do() { done # and server, - run_podman run --net=pasta"${pasta_spec}" -p "${podman_spec}" "${IMAGE}" \ + run_podman run --rm --net=pasta"${pasta_spec}" -p "${podman_spec}" "${IMAGE}" \ sh -c 'for port in $(seq '"${xseq}"'); do '\ ' socat -u '"${bind}"' '"${recv}"' & '\ ' done; wait' @@ -259,7 +244,7 @@ function pasta_test_do() { @test "IPv4 default address assignment" { skip_if_no_ipv4 "IPv4 not routable on the host" - run_podman run --net=pasta $IMAGE ip -j -4 address show + run_podman run --rm --net=pasta $IMAGE ip -j -4 address show local container_address="$(ipv4_get_addr_global "${output}")" local host_address="$(default_addr 4)" @@ -271,7 +256,7 @@ function pasta_test_do() { @test "IPv4 address assignment" { skip_if_no_ipv4 "IPv4 not routable on the host" - run_podman run --net=pasta:-a,192.0.2.1 $IMAGE ip -j -4 address show + run_podman run --rm --net=pasta:-a,192.0.2.1 $IMAGE ip -j -4 address show local container_address="$(ipv4_get_addr_global "${output}")" @@ -283,7 +268,7 @@ function pasta_test_do() { skip_if_no_ipv4 "IPv4 not routable on the host" skip_if_no_ipv6 "IPv6 not routable on the host" - run_podman run --net=pasta:-6 $IMAGE ip -j -4 address show + run_podman run --rm --net=pasta:-6 $IMAGE ip -j -4 address show local container_address="$(ipv4_get_addr_global "${output}")" @@ -294,7 +279,7 @@ function pasta_test_do() { @test "IPv6 default address assignment" { skip_if_no_ipv6 "IPv6 not routable on the host" - run_podman run --net=pasta $IMAGE ip -j -6 address show + run_podman run --rm --net=pasta $IMAGE ip -j -6 address show local container_address="$(ipv6_get_addr_global "${output}")" local host_address="$(default_addr 6)" @@ -306,7 +291,7 @@ function pasta_test_do() { @test "IPv6 address assignment" { skip_if_no_ipv6 "IPv6 not routable on the host" - run_podman run --net=pasta:-a,2001:db8::1 $IMAGE ip -j -6 address show + run_podman run --rm --net=pasta:-a,2001:db8::1 $IMAGE ip -j -6 address show local container_address="$(ipv6_get_addr_global "${output}")" @@ -318,7 +303,7 @@ function pasta_test_do() { skip_if_no_ipv6 "IPv6 not routable on the host" skip_if_no_ipv4 "IPv4 not routable on the host" - run_podman run --net=pasta:-4 $IMAGE ip -j -6 address show + run_podman run --rm --net=pasta:-4 $IMAGE ip -j -6 address show local container_address="$(ipv6_get_addr_global "${output}")" @@ -346,7 +331,7 @@ function pasta_test_do() { @test "IPv4 default route" { skip_if_no_ipv4 "IPv4 not routable on the host" - run_podman run --net=pasta $IMAGE ip -j -4 route show + run_podman run --rm --net=pasta $IMAGE ip -j -4 route show local container_route="$(ipv4_get_route_default "${output}")" local host_route="$(ipv4_get_route_default)" @@ -358,7 +343,7 @@ function pasta_test_do() { @test "IPv4 default route assignment" { skip_if_no_ipv4 "IPv4 not routable on the host" - run_podman run --net=pasta:-a,192.0.2.2,-g,192.0.2.1 $IMAGE \ + run_podman run --rm --net=pasta:-a,192.0.2.2,-g,192.0.2.1 $IMAGE \ ip -j -4 route show local container_route="$(ipv4_get_route_default "${output}")" @@ -370,7 +355,7 @@ function pasta_test_do() { @test "IPv6 default route" { skip_if_no_ipv6 "IPv6 not routable on the host" - run_podman run --net=pasta $IMAGE ip -j -6 route show + run_podman run --rm --net=pasta $IMAGE ip -j -6 route show local container_route="$(ipv6_get_route_default "${output}")" local host_route="$(ipv6_get_route_default)" @@ -382,7 +367,7 @@ function pasta_test_do() { @test "IPv6 default route assignment" { skip_if_no_ipv6 "IPv6 not routable on the host" - run_podman run --net=pasta:-a,2001:db8::2,-g,2001:db8::1 $IMAGE \ + run_podman run --rm --net=pasta:-a,2001:db8::2,-g,2001:db8::1 $IMAGE \ ip -j -6 route show local container_route="$(ipv6_get_route_default "${output}")" @@ -394,7 +379,7 @@ function pasta_test_do() { ### Interfaces ################################################################# @test "Default MTU" { - run_podman run --net=pasta $IMAGE ip -j link show + run_podman run --rm --net=pasta $IMAGE ip -j link show container_tap_mtu="$(ether_get_mtu "${output}")" @@ -403,7 +388,7 @@ function pasta_test_do() { } @test "MTU assignment" { - run_podman run --net=pasta:-m,1280 $IMAGE ip -j link show + run_podman run --rm --net=pasta:-m,1280 $IMAGE ip -j link show container_tap_mtu="$(ether_get_mtu "${output}")" @@ -412,7 +397,7 @@ function pasta_test_do() { } @test "Loopback interface state" { - run_podman run --net=pasta $IMAGE ip -j link show + run_podman run --rm --net=pasta $IMAGE ip -j link show local jq_expr='.[] | select(.link_type == "loopback").flags | '\ ' contains(["UP"])' @@ -428,7 +413,7 @@ function pasta_test_do() { @test "External resolver, IPv4" { skip_if_no_ipv4 "IPv4 not routable on the host" - run_podman '?' run --net=pasta $IMAGE nslookup 127.0.0.1 + run_podman '?' run --rm --net=pasta $IMAGE nslookup 127.0.0.1 assert "$output" =~ "1.0.0.127.in-addr.arpa" \ "127.0.0.1 not resolved" @@ -437,7 +422,7 @@ function pasta_test_do() { @test "External resolver, IPv6" { skip_if_no_ipv6 "IPv6 not routable on the host" - run_podman run --net=pasta $IMAGE nslookup ::1 || : + run_podman '?' run --rm --net=pasta $IMAGE nslookup ::1 assert "$output" =~ "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa" \ "::1 not resolved" @@ -446,9 +431,12 @@ function pasta_test_do() { @test "Local forwarder, IPv4" { skip_if_no_ipv4 "IPv4 not routable on the host" - run_podman run --dns 198.51.100.1 \ - --net=pasta:--dns-forward,198.51.100.1 $IMAGE nslookup 127.0.0.1 || : + # pasta is the default now so no need to set it + run_podman run --rm $IMAGE grep nameserver /etc/resolv.conf + assert "${lines[0]}" == "nameserver 169.254.0.1" "default dns forward server" + run_podman run --rm --net=pasta:--dns-forward,198.51.100.1 \ + $IMAGE nslookup 127.0.0.1 || : assert "$output" =~ "1.0.0.127.in-addr.arpa" "No answer from resolver" } @@ -755,7 +743,7 @@ function pasta_test_do() { @test "pasta(1) quits when the namespace is gone" { local pidfile="${PODMAN_TMPDIR}/pasta.pid" - run_podman run "--net=pasta:--pid,${pidfile}" $IMAGE true + run_podman run --rm "--net=pasta:--pid,${pidfile}" $IMAGE true sleep 1 ! ps -p $(cat "${pidfile}") && rm "${pidfile}" } @@ -764,7 +752,7 @@ function pasta_test_do() { @test "Unsupported protocol in port forwarding" { local port=$(random_free_port "" "" tcp) - run_podman 126 run --net=pasta -p "${port}:${port}/sctp" $IMAGE true + run_podman 126 run --rm --net=pasta -p "${port}:${port}/sctp" $IMAGE true is "$output" "Error: .*can't forward protocol: sctp" } @@ -781,11 +769,56 @@ EOF # 2023-06-29 DO NOT INCLUDE "--net=pasta" on this line! # This tests containers.conf:default_rootless_network_cmd (pr #19032) - CONTAINERS_CONF_OVERRIDE=$containersconf run_podman run $IMAGE ip link show myname + CONTAINERS_CONF_OVERRIDE=$containersconf run_podman run --rm $IMAGE ip link show myname assert "$output" =~ "$mac" "mac address is set on custom interface" # now, again but this time overwrite a option on the cli. mac2="aa:bb:cc:dd:ee:ff" - CONTAINERS_CONF_OVERRIDE=$containersconf run_podman run --net=pasta:--ns-mac-addr,"$mac2" $IMAGE ip link show myname + CONTAINERS_CONF_OVERRIDE=$containersconf run_podman run --rm \ + --net=pasta:--ns-mac-addr,"$mac2" $IMAGE ip link show myname assert "$output" =~ "$mac2" "mac address from cli is set on custom interface" } + +### Rootless unshare testing + +@test "Podman unshare --rootless-netns with Pasta" { + skip_if_remote "unshare is local-only" + + pasta_iface=$(default_ifname) + + # First let's force a setup error by making pasta be "false". + ln -s /usr/bin/false $PODMAN_TMPDIR/pasta + CONTAINERS_HELPER_BINARY_DIR="$PODMAN_TMPDIR" run_podman 125 unshare --rootless-netns ip addr + assert "$output" =~ "pasta failed with exit code 1" + + # Now this should recover from the previous error and setup the netns correctly. + run_podman unshare --rootless-netns ip addr + is "$output" ".*${pasta_iface}.*" +} + +# https://github.com/containers/podman/issues/22653 +@test "pasta/bridge and host.containers.internal" { + skip_if_no_ipv4 "IPv4 not routable on the host" + pasta_ip="$(default_addr 4)" + + for network in "pasta" "bridge"; do + # special exit code logic needed here, it is possible that there is no host.containers.internal + # when there is only one ip one the host and that one is used by pasta. + # As such we have to deal with both cases. + run_podman '?' run --rm --network=$network $IMAGE grep host.containers.internal /etc/hosts + if [ "$status" -eq 0 ]; then + assert "$output" !~ "$pasta_ip" "pasta host ip must not be assigned ($network)" + assert "$(hostname -I)" =~ "$(cut -f1 <<<$output)" "ip is one of the host ips ($network)" + elif [ "$status" -eq 1 ]; then + # if only pasta ip then we cannot have a host.containers.internal entry + # make sure this fact is actually the case + assert "$pasta_ip" == "$(hostname -I | tr -d '[:space:]')" "pasta ip must the only one one the host ($network)" + else + die "unexpected exit code '$status' from grep or podman ($network)" + fi + done + + host_ip=$(hostname -I | cut -f 1 -d " ") + run_podman run --rm --network=pasta:-a,169.254.0.2,-g,169.254.0.1,-n,24 $IMAGE grep host.containers.internal /etc/hosts + assert "$output" =~ "^$host_ip" "uses host first ip" +} diff --git a/test/system/520-checkpoint.bats b/test/system/520-checkpoint.bats index a43cf89a94..5a8493c62e 100644 --- a/test/system/520-checkpoint.bats +++ b/test/system/520-checkpoint.bats @@ -22,6 +22,14 @@ function setup() { skip "checkpoint does not work rootless" fi + # As of 2024-05, crun on Debian is not built with criu support: + # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1008249 + runtime=$(podman_runtime) + run $runtime checkpoint --help + if [[ $status -ne 0 ]]; then + skip "runtime $runtime does not support checkpoint/restore" + fi + basic_setup } @@ -67,13 +75,19 @@ function teardown() { is "$output" "running:true:false:false" \ "State. Status:Running:Pause:Checkpointed" - # Pause briefly to let restarted container emit some output - sleep 0.3 - - # Get full logs, and make sure something changed - run_podman logs $cid - local nlines_after="${#lines[*]}" - assert $nlines_after -gt $nlines_before \ + # Re-fetch logs, and ensure that they continue growing. + # Allow a short while for container process to actually restart. + local retries=10 + while [[ $retries -gt 0 ]]; do + run_podman logs $cid + local nlines_after="${#lines[*]}" + if [[ $nlines_after -gt $nlines_before ]]; then + break + fi + sleep 0.1 + retries=$((retries - 1)) + done + assert "$retries" -gt 0 \ "Container failed to output new lines after first restore" # Same thing again: test for https://github.com/containers/crun/issues/756 @@ -83,12 +97,18 @@ function teardown() { nlines_before="${#lines[*]}" run_podman container restore $cid - # Give container time to write new output; then confirm that something - # was emitted - sleep 0.3 - run_podman container logs $cid - nlines_after="${#lines[*]}" - assert $nlines_after -gt $nlines_before \ + # Same as above, confirm that we get new output + retries=10 + while [[ $retries -gt 0 ]]; do + run_podman logs $cid + local nlines_after="${#lines[*]}" + if [[ $nlines_after -gt $nlines_before ]]; then + break + fi + sleep 0.1 + retries=$((retries - 1)) + done + assert "$retries" -gt 0 \ "stdout went away after second restore (crun issue 756)" run_podman rm -t 0 -f $cid @@ -208,6 +228,8 @@ function teardown() { run_podman logs $cid trim=$(sed -z -e 's/[\r\n]\+//g' <<<"$output") is "$trim" "READY123123" "File lock restored" + + run_podman rm $cid } @test "podman checkpoint/restore ip and mac handling" { @@ -225,6 +247,9 @@ function teardown() { run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" mac1="$output" + run_podman exec $cid cat /etc/hosts /etc/resolv.conf + pre_hosts_resolv_conf_output="$output" + run_podman container checkpoint $cid is "$output" "$cid" run_podman container restore $cid @@ -236,6 +261,10 @@ function teardown() { run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" mac2="$output" + # Make sure hosts and resolv.conf are the same after restore (#22901) + run_podman exec $cid cat /etc/hosts /etc/resolv.conf + assert "$output" == "$pre_hosts_resolv_conf_output" "hosts/resolv.conf must be the same after checkpoint" + assert "$ip2" == "$ip1" "ip after restore should match" assert "$mac2" == "$mac1" "mac after restore should match" diff --git a/test/system/550-pause-process.bats b/test/system/550-pause-process.bats index 4e99d85f26..eee770f27a 100644 --- a/test/system/550-pause-process.bats +++ b/test/system/550-pause-process.bats @@ -4,8 +4,17 @@ # load helpers +load helpers.registry load helpers.sig-proxy +function setup_file() { + # We have to stop the background registry here. These tests kill the podman pause + # process which means commands after that are in a new one and when the cleanup + # later tries to stop the registry container it will be in the wrong ns and can fail. + # https://github.com/containers/podman/pull/21563#issuecomment-1960047648 + stop_registry +} + function _check_pause_process() { pause_pid= if [[ -z "$pause_pid_file" ]]; then @@ -85,7 +94,9 @@ function _check_pause_process() { run_podman system migrate # We're forced to use $PODMAN because run_podman cannot be backgrounded - $PODMAN run -i --name c_run $IMAGE sh -c "$SLEEPLOOP" & + # Also special logic to set a different argv0 to make sure the reexec still works: + # https://github.com/containers/podman/issues/22672 + bash -c "exec -a argv0-podman $PODMAN run -i --name c_run $IMAGE sh -c '$SLEEPLOOP'" & local kidpid=$! _test_sigproxy c_run $kidpid diff --git a/test/system/600-completion.bats b/test/system/600-completion.bats index 6a577ffc12..2f5aa0b9c7 100644 --- a/test/system/600-completion.bats +++ b/test/system/600-completion.bats @@ -213,8 +213,8 @@ function check_shell_completion() { i=$(($i + 1)) # If the argument ends with ...] than we accept 0...n args - # Loop three times to make sure we are not only completing the first arg - if [[ ! ${arg} =~ "..." ]] || [[ i -gt 3 ]]; then + # Loop two times to make sure we are not only completing the first arg + if [[ ! ${arg} =~ "..." ]] || [[ i -gt 1 ]]; then break fi diff --git a/test/system/700-play.bats b/test/system/700-play.bats index f06859848a..7c26a4fa8e 100644 --- a/test/system/700-play.bats +++ b/test/system/700-play.bats @@ -17,6 +17,7 @@ function teardown() { run_podman rmi $id fi done <<<"$output" + run_podman network rm -f podman-default-kube-network basic_teardown } @@ -31,8 +32,7 @@ metadata: spec: containers: - command: - - sleep - - \"100\" + - /home/podman/pause env: - name: PATH value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin @@ -255,11 +255,13 @@ EOF run_podman stop -a -t 0 run_podman pod rm -t 0 -f test_pod - run_podman kube play --network slirp4netns:port_handler=slirp4netns $PODMAN_TMPDIR/test.yaml - run_podman pod inspect --format {{.InfraContainerID}} "${lines[1]}" - infraID="$output" - run_podman container inspect --format "{{.HostConfig.NetworkMode}}" $infraID - is "$output" "slirp4netns" "network mode slirp4netns is set for the container" + if has_slirp4netns; then + run_podman kube play --network slirp4netns:port_handler=slirp4netns $PODMAN_TMPDIR/test.yaml + run_podman pod inspect --format {{.InfraContainerID}} "${lines[1]}" + infraID="$output" + run_podman container inspect --format "{{.HostConfig.NetworkMode}}" $infraID + is "$output" "slirp4netns" "network mode slirp4netns is set for the container" + fi run_podman stop -a -t 0 run_podman pod rm -t 0 -f test_pod @@ -396,6 +398,8 @@ _EOF _write_test_yaml command=id image=quay.io/libpod/userimage run_podman 125 play kube --build --start=false $PODMAN_TMPDIR/test.yaml + assert "$output" =~ "initializing source docker://quay.io/libpod/userimage:latest: reading manifest latest in " + run_podman play kube --replace --context-dir=$PODMAN_TMPDIR --build --start=false $PODMAN_TMPDIR/test.yaml run_podman inspect --format "{{ .Config.User }}" test_pod-test is "$output" bin "expect container within pod to run as the bin user" @@ -455,32 +459,7 @@ _EOF run_podman pod rm -t 0 -f test_pod } -@test "podman play --annotation > Max" { - TESTDIR=$PODMAN_TMPDIR/testdir - RANDOMSTRING=$(random_string 65) - mkdir -p $TESTDIR - echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml - run_podman 125 play kube --annotation "name=$RANDOMSTRING" $PODMAN_TMPDIR/test.yaml - assert "$output" =~ "annotation exceeds maximum size, 63, of kubernetes annotation:" "Expected to fail with Length greater than 63" -} - -@test "podman play --no-trunc --annotation > Max" { - TESTDIR=$PODMAN_TMPDIR/testdir - RANDOMSTRING=$(random_string 65) - mkdir -p $TESTDIR - echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml - run_podman play kube --no-trunc --annotation "name=$RANDOMSTRING" $PODMAN_TMPDIR/test.yaml -} - -@test "podman play Yaml with annotation > Max" { - RANDOMSTRING=$(random_string 65) - - _write_test_yaml "annotations=test: ${RANDOMSTRING}" command=id - run_podman 125 play kube - < $PODMAN_TMPDIR/test.yaml - assert "$output" =~ "annotation \"test\"=\"$RANDOMSTRING\" value length exceeds Kubernetes max 63" "Expected to fail with annotation length greater than 63" -} - -@test "podman play Yaml --no-trunc with annotation > Max" { +@test "podman play Yaml deprecated --no-trunc annotation" { RANDOMSTRING=$(random_string 65) _write_test_yaml "annotations=test: ${RANDOMSTRING}" command=id @@ -511,15 +490,19 @@ _EOF TESTDIR=$PODMAN_TMPDIR/testdir mkdir -p $TESTDIR echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml + echo READY > $PODMAN_TMPDIR/ready HOST_PORT=$(random_free_port) SERVER=http://127.0.0.1:$HOST_PORT run_podman run -d --name myyaml -p "$HOST_PORT:80" \ -v $PODMAN_TMPDIR/test.yaml:/var/www/testpod.yaml:Z \ + -v $PODMAN_TMPDIR/ready:/var/www/ready:Z \ -w /var/www \ $IMAGE /bin/busybox-extras httpd -f -p 80 + wait_for_port 127.0.0.1 $HOST_PORT + wait_for_command_output "curl -s -S $SERVER/ready" "READY" run_podman kube play $SERVER/testpod.yaml run_podman inspect test_pod-test --format "{{.State.Running}}" @@ -569,7 +552,7 @@ EOF run_podman kube play $PODMAN_TMPDIR/test.yaml run_podman pod inspect test_pod --format "{{.InfraConfig.PortBindings}}" - assert "$output" = "map[$HOST_PORT/tcp:[{ $HOST_PORT}]]" + assert "$output" = "map[$HOST_PORT/tcp:[{0.0.0.0 $HOST_PORT}]]" run_podman kube down $PODMAN_TMPDIR/test.yaml run_podman pod rm -a -f @@ -652,21 +635,33 @@ EOF # kube play --wait=true, where we clear up the created containers, pods, and volumes when a kill or sigterm is triggered @test "podman kube play --wait with siginterrupt" { cname=c$(random_string 15) - fname="/tmp/play_kube_wait_$(random_string 6).yaml" - run_podman container create --name $cname $IMAGE top - run_podman kube generate -f $fname $cname - - # delete the container we generated from - run_podman rm -f $cname + fname="$PODMAN_TMPDIR/play_kube_wait_$(random_string 6).yaml" + echo " +apiVersion: v1 +kind: Pod +metadata: + labels: + app: test + name: test_pod +spec: + restartPolicy: Never + containers: + - name: server + image: $IMAGE + command: + - top +" > $fname # force a timeout to happen so that the kube play command is killed # and expect the timeout code 124 to happen so that we can clean up local t0=$SECONDS - PODMAN_TIMEOUT=15 run_podman 124 kube play --wait $fname + PODMAN_TIMEOUT=2 run_podman 124 kube play --wait $fname local t1=$SECONDS local delta_t=$((t1 - t0)) - assert $delta_t -le 20 \ - "podman kube play did not get killed within 10 seconds" + assert $delta_t -le 4 \ + "podman kube play did not get killed within 3 seconds" + # Make sure we actually got SIGTERM and podman printed its message. + assert "$output" =~ "Cleaning up containers, pods, and volumes" "kube play printed sigterm message" # there should be no containers running or created run_podman ps -aq @@ -675,7 +670,7 @@ EOF } @test "podman kube play --wait - wait for pod to exit" { - fname="/tmp/play_kube_wait_$(random_string 6).yaml" + fname="$PODMAN_TMPDIR/play_kube_wait_$(random_string 6).yaml" echo " apiVersion: v1 kind: Pod @@ -755,8 +750,18 @@ spec: run_podman kube play --configmap=$configmap_file $pod_file run_podman wait test_pod-server - run_podman logs test_pod-server - is "$output" "foo:bar" + + # systemd logs are unreliable; we may need to retry a few times + # https://github.com/systemd/systemd/issues/28650 + local retries=10 + while [[ $retries -gt 0 ]]; do + run_podman logs test_pod-server + test -n "$output" && break + sleep 0.1 + retries=$((retries - 1)) + done + assert "$retries" -gt 0 "Timed out waiting for podman logs" + assert "$output" = "foo:bar" "output from podman logs" run_podman kube down $pod_file } @@ -768,7 +773,8 @@ spec: bogus=$PODMAN_TMPDIR/bogus-authfile run_podman 125 kube play --authfile=$bogus - < $PODMAN_TMPDIR/test.yaml - is "$output" "Error: credential file is not accessible: stat $bogus: no such file or directory" "$command should fail with not such file" + is "$output" "Error: credential file is not accessible: faccessat $bogus: no such file or directory" \ + "$command should fail with not such file" } @test "podman kube play with umask from containers.conf" { @@ -790,9 +796,16 @@ EOF CONTAINERS_CONF_OVERRIDE="$containersConf" run_podman kube play $YAML run_podman container inspect --format '{{ .Config.Umask }}' $ctrInPod is "${output}" "0472" - # Confirm that umask actually takes effect - run_podman logs $ctrInPod - is "$output" "204" "stat() on created file" + # Confirm that umask actually takes effect. Might take a second or so. + local retries=10 + while [[ $retries -gt 0 ]]; do + run_podman logs $ctrInPod + test -n "$output" && break + sleep 0.1 + retries=$((retries - 1)) + done + assert "$retries" -gt 0 "Timed out waiting for container output" + assert "$output" = "204" "stat() on created file" run_podman kube down $YAML run_podman pod rm -a @@ -800,15 +813,12 @@ EOF } @test "podman kube generate tmpfs on /tmp" { - KUBE=$PODMAN_TMPDIR/kube.yaml - run_podman create --name test $IMAGE sleep 100 - run_podman kube generate test -f $KUBE - run_podman kube play $KUBE - run_podman exec test-pod-test sh -c "mount | grep /tmp" + local yaml=$PODMAN_TMPDIR/test.yaml + _write_test_yaml command=/home/podman/pause + run_podman kube play $yaml + run_podman exec test_pod-test sh -c "mount | grep /tmp" assert "$output" !~ "noexec" "mounts on /tmp should not be noexec" - run_podman kube down $KUBE - run_podman pod rm -a -f -t 0 - run_podman rm -a -f -t 0 + run_podman kube down $yaml } @test "podman kube play - pull policy" { @@ -882,8 +892,7 @@ spec: done assert $output == "2-healthy" "After 3 seconds" - run_podman kube down $fname - run_podman pod rm -a + run_podman pod rm -fa -t0 run_podman rm -a } @@ -935,8 +944,7 @@ spec: done assert $output == "2-unhealthy" "After 3 seconds" - run_podman kube down $fname - run_podman pod rm -a + run_podman pod rm -fa -t0 run_podman rm -a } @@ -982,3 +990,48 @@ _EOF run_podman pod rm -t 0 -f test_pod run_podman rmi -f userimage:latest $from_image } + +@test "podman play with automount volume" { + cat >$PODMAN_TMPDIR/Containerfile < $fname + + run_podman kube play --annotation "io.podman.annotations.kube.image.volumes.mount/testctr=automount_test" $fname + + run_podman run --rm automount_test ls /test1 + run_out_test1="$output" + run_podman exec test_pod-testctr ls /test1 + assert "$output" = "$run_out_test1" "matching ls run/exec volume path test1" + + run_podman run --rm automount_test ls /test2 + run_out_test2="$output" + run_podman exec test_pod-testctr ls /test2 + assert "$output" = "$run_out_test2" "matching ls run/exec volume path test2" + + run_podman rm -f -t 0 -a + run_podman rmi automount_test +} diff --git a/test/system/710-kube.bats b/test/system/710-kube.bats index b178c2950f..cb49260a6a 100644 --- a/test/system/710-kube.bats +++ b/test/system/710-kube.bats @@ -88,6 +88,7 @@ status | = | null run_podman pod rm -a run_podman rm -a run_podman rmi $(pause_image) + run_podman network rm podman-default-kube-network } @test "podman kube generate - pod" { diff --git a/test/system/760-system-renumber.bats b/test/system/760-system-renumber.bats new file mode 100644 index 0000000000..d6763d065b --- /dev/null +++ b/test/system/760-system-renumber.bats @@ -0,0 +1,23 @@ +#!/usr/bin/env bats -*- bats -*- +# +# tests for podman system renumber +# + +load helpers + +function setup() { + basic_setup + + skip_if_remote "podman system renumber is not available remote" +} + +@test "podman system renumber - Basic test with a volume" { + run_podman volume create test + assert "$output" == "test" "podman volume create output" + run_podman system renumber + assert "$output" == "" "podman system renumber output" + run_podman volume rm test + assert "$output" == "test" "podman volume rm output" +} + +# vim: filetype=sh diff --git a/test/system/800-config.bats b/test/system/800-config.bats index 1efde8d4d4..2ff116ae27 100644 --- a/test/system/800-config.bats +++ b/test/system/800-config.bats @@ -107,7 +107,7 @@ See 'podman create --help'" "--module must be specified before the command" # Nonexistent module path with comma nonesuch=${PODMAN_TMPDIR}/nonexistent,withcomma run_podman 1 --module=$nonesuch sdfsdfdsf - is "$output" "Failed to obtain podman configuration: could not resolve module \"$nonesuch\": stat $nonesuch: no such file or directory" \ + is "$output" "Failed to obtain podman configuration: could not resolve module \"$nonesuch\": faccessat $nonesuch: no such file or directory" \ "--module=ENOENT" } @@ -188,7 +188,7 @@ EOF XDG_CONFIG_HOME=$fake_home run_podman 1 --module=$nonesuch invalid-command expect="Failed to obtain podman configuration: could not resolve module \"$nonesuch\": 3 errors occurred:" for dir in $fake_home /etc /usr/share;do - expect+=$'\n\t'"* stat $dir/containers/containers.conf.modules/$nonesuch: no such file or directory" + expect+=$'\n\t'"* faccessat $dir/containers/containers.conf.modules/$nonesuch: no such file or directory" done is "$output" "$expect" "--module=ENOENT : error message" } diff --git a/test/system/850-compose.bats b/test/system/850-compose.bats index 91e6afc267..8d967ad2d8 100644 --- a/test/system/850-compose.bats +++ b/test/system/850-compose.bats @@ -55,10 +55,20 @@ EOF CONTAINERS_CONF_OVERRIDE=$compose_conf run_podman 42 compose fail # Make sure the three env variables are set (and parsed) + op='=~' + url=".*/podman.sock" + # if we run remote with --url check the url arg is honored + if [[ "$PODMAN" =~ "--url" ]]; then + # get the url from the podman string + url="${PODMAN##*--url }" + url="${url%% *}" + op='=' + fi + # podman-remote test might run with --url so unset this because the socket will be used otherwise CONTAINERS_CONF_OVERRIDE=$compose_conf run_podman compose env - is "${lines[0]}" ".*/podman.sock" - is "${lines[1]}" "0" - is "${lines[2]}" "" + assert "${lines[0]}" $op "$url" "line 1 of 3 (DOCKER_HOST)" + assert "${lines[1]}" = "0" "line 2 of 3 (DOCKER_BUILDKIT)" + assert "${lines[2]}" = "" "line 3 of 3 (DOCKER_CONFIG)" DOCKER_HOST="$random_data" DOCKER_CONFIG="$random_data" CONTAINERS_CONF_OVERRIDE=$compose_conf run_podman compose env is "${lines[0]}" "$random_data" diff --git a/test/system/README.md b/test/system/README.md index c4d73d2df5..8472f00faf 100644 --- a/test/system/README.md +++ b/test/system/README.md @@ -46,13 +46,9 @@ without having to wait for the entire test suite. Running tests ============= -To run the tests locally in your sandbox, you can use one of these methods: -* make;PODMAN=./bin/podman bats ./test/system/070-build.bats # runs just the specified test -* make;PODMAN=./bin/podman bats ./test/system # runs all -* make;PODMAN=./bin/podman NETWORK_BACKEND=netavark bats ./test/system # Assert & enable netavark testing +To run the tests locally in your sandbox using `hack/bats` is recommend, check `hack/bats --help` for info about usage. -To test as root: -* $ PODMAN=./bin/podman sudo --preserve-env=PODMAN bats test/system +To run the entire suite use `make localsystem` or `make remotesystem` for podman-remote testing. Analyzing test failures ======================= diff --git a/test/system/TODO.md b/test/system/TODO.md deleted file mode 100644 index 55e7601d16..0000000000 --- a/test/system/TODO.md +++ /dev/null @@ -1,105 +0,0 @@ -![PODMAN logo](https://raw.githubusercontent.com/containers/common/main/logos/podman-logo-full-vert.png) - -# Overview - -System tests exercise Podman in the context of a complete, composed environment from -distribution packages. It should match as closely as possible to how an end-user -would experience a fresh-install. Dependencies on external configuration and resources -must be kept minimal, and the tests must be generic and vendor-neutral. - -The system-tests must execute cleanly on all tested platforms. They may optionally -be executed during continuous-integration testing of code-changes, after all other -testing completes successfully. For a list of tested platforms, please see [the -CI configuration file.](../../.cirrus.yml) - - -# Execution - -When working from a clone of [the libpod repository](https://github.com/containers/podman), -the main entry-point for humans and automation is `make localsystem`. When operating -from a packaged version of the system-tests, the entry-point may vary as appropriate. -Running the packaged system-tests assumes the version of Podman matches the test -version, and all standard dependencies are installed. - - -# Test Design and overview - -System-tests should be high-level and user work-flow oriented. For example, consider -how multiple Podman invocations would be used together by an end-user. The set of -related commands should be considered a single test. If one or more intermediate -commands fail, the test could still pass if the end-result is still achieved. - - -# *TODO*: List of needed System-tests - -***Note***: Common operations (like `rm` and `rmi` for cleanup/reset) -have been omitted as they are verified by repeated implied use. - -- [ ] pull, build, run, attach, commit, diff, inspect - - - Pull existing image from registry - - Build new image FROM explicitly pulled image - - Run built container in detached mode - - Attach to running container, execute command to modify storage. - - Commit running container to new image w/ changed ENV VAR - - Verify attach + commit using diff - - verify changed ENV VAR with inspect - -- [ ] Implied pull, create, start, exec, log, stop, wait, rm - - - Create non-existing local image - - start stopped container - - exec simple command in running container - - verify exec result with log - - wait on running container - - stop running container with 2 second timeout - - verify wait in 4 seconds or less - - verify stopped by rm **without** --force - -- [ ] Implied pull, build, export, modify, import, tag, run, kill - - - Build from Dockerfile FROM non-existing local image - - Export built container as tarball - - Modify tarball contents - - Import tarball - - Tag imported image - - Run imported image to confirm tarball modification, block on non-special signal - - Kill can send non-TERM/KILL signal to container to exit - - Confirm exit within timeout - -- [ ] Container runlabel, exists, checkpoint, exists, restore, stop, prune - - - Using pre-existing remote image, start it with 'podman container runlabel --pull' - - Run a named container that exits immediately - - Confirm 'container exists' zero exit (both containers) - - Checkpoint the running container - - Confirm 'container exists' non-zero exit (runlabel container) - - Confirm 'container exists' zero exit (named container) - - Run 'container restore' - - Confirm 'container exists' zero exit (both containers) - - Stop container - - Run 'container prune' - - Confirm `podman ps -a` lists no containers - - -# TODO: List of commands to be combined into additional workflows above. - -- podman-remote (workflow TBD) -- history -- image -- load -- mount -- pause -- pod -- port -- login, push, & logout (difficult, save for last) -- restart -- save -- search -- stats -- top -- umount, unmount -- unpause -- volume -- `--namespace` -- `--storage-driver` diff --git a/test/system/helpers.bash b/test/system/helpers.bash index a27ad21f6c..fedb9d19aa 100644 --- a/test/system/helpers.bash +++ b/test/system/helpers.bash @@ -4,6 +4,9 @@ PODMAN=${PODMAN:-podman} QUADLET=${QUADLET:-/usr/libexec/podman/quadlet} +# Podman testing helper used in 331-system-check tests +PODMAN_TESTING=${PODMAN_TESTING:-$(dirname ${BASH_SOURCE})/../../bin/podman-testing} + # crun or runc, unlikely to change. Cache, because it's expensive to determine. PODMAN_RUNTIME= @@ -138,60 +141,6 @@ function skopeo() { # Setup helper: establish a test environment with exactly the images needed function basic_setup() { - # Clean up all containers - run_podman rm -t 0 --all --force --ignore - - # ...including external (buildah) ones - run_podman ps --all --external --format '{{.ID}} {{.Names}}' - for line in "${lines[@]}"; do - set $line - echo "# setup(): removing stray external container $1 ($2)" >&3 - run_podman '?' rm -f $1 - if [[ $status -ne 0 ]]; then - echo "# [setup] $_LOG_PROMPT podman rm -f $1" >&3 - for errline in "${lines[@]}"; do - echo "# $errline" >&3 - done - fi - done - - # Clean up all images except those desired. - # 2023-06-26 REMINDER: it is tempting to think that this is clunky, - # wouldn't it be safer/cleaner to just 'rmi -a' then '_prefetch $IMAGE'? - # Yes, but it's also tremendously slower: 29m for a CI run, to 39m. - # Image loads are slow. - found_needed_image= - run_podman '?' images --all --format '{{.Repository}}:{{.Tag}} {{.ID}}' - - for line in "${lines[@]}"; do - set $line - if [[ "$1" == "$PODMAN_TEST_IMAGE_FQN" ]]; then - if [[ -z "$PODMAN_TEST_IMAGE_ID" ]]; then - # This will probably only trigger the 2nd time through setup - PODMAN_TEST_IMAGE_ID=$2 - fi - found_needed_image=1 - elif [[ "$1" == "$PODMAN_SYSTEMD_IMAGE_FQN" ]]; then - # This is a big image, don't force unnecessary pulls - : - else - # Always remove image that doesn't match by name - echo "# setup(): removing stray image $1" >&3 - run_podman rmi --force "$1" >/dev/null 2>&1 || true - - # Tagged image will have same IID as our test image; don't rmi it. - if [[ $2 != "$PODMAN_TEST_IMAGE_ID" ]]; then - echo "# setup(): removing stray image $2" >&3 - run_podman rmi --force "$2" >/dev/null 2>&1 || true - fi - fi - done - - # Make sure desired image is present - if [[ -z "$found_needed_image" ]]; then - _prefetch $PODMAN_TEST_IMAGE_FQN - fi - # Temporary subdirectory, in which tests can write whatever they like # and trust that it'll be deleted on cleanup. # (BATS v1.3 and above provide $BATS_TEST_TMPDIR, but we still use @@ -255,38 +204,32 @@ function defer-assertion-failures() { # Basic teardown: remove all pods and containers function basic_teardown() { echo "# [teardown]" >&2 - local actions=( - "pod rm -t 0 --all --force --ignore" - "rm -t 0 --all --force --ignore" - "network prune --force" - "volume rm -a -f" - ) - for action in "${actions[@]}"; do - run_podman '?' $action - - # The -f commands should never exit nonzero, but if they do we want - # to know about it. - # FIXME: someday: also test for [[ -n "$output" ]] - can't do this - # yet because too many tests don't clean up their containers - if [[ $status -ne 0 ]]; then - echo "# [teardown] $_LOG_PROMPT podman $action" >&3 - for line in "${lines[*]}"; do - echo "# $line" >&3 - done - # Special case for timeout: check for locks (#18514) - if [[ $status -eq 124 ]]; then - echo "# [teardown] $_LOG_PROMPT podman system locks" >&3 - run $PODMAN system locks - for line in "${lines[*]}"; do - echo "# $line" >&3 - done - fi - fi - done + immediate-assertion-failures + # Unlike normal tests teardown will not exit on first command failure + # but rather only uses the return code of the teardown function. + # This must be directly after immediate-assertion-failures to capture the error code + local exit_code=$? + + # Only checks for leaks on a successful run (BATS_TEST_COMPLETED is set 1), + # immediate-assertion-failures didn't fail (exit_code -eq 0) + # and PODMAN_BATS_LEAK_CHECK is set. + # As these podman commands are slow we do not want to do this by default + # and only provide this as opt in option. (#22909) + if [[ "$BATS_TEST_COMPLETED" -eq 1 ]] && [ $exit_code -eq 0 ] && [ -n "$PODMAN_BATS_LEAK_CHECK" ]; then + leak_check + exit_code=$((exit_code + $?)) + fi + # Some error happened (either in teardown itself or the actual test failed) + # so do a full cleanup to ensure following tests start with a clean env. + if [ $exit_code -gt 0 ] || [ -z "$BATS_TEST_COMPLETED" ]; then + clean_setup + exit_code=$((exit_code + $?)) + fi command rm -rf $PODMAN_TMPDIR - immediate-assertion-failures + exit_code=$((exit_code + $?)) + return $exit_code } @@ -320,6 +263,126 @@ function restore_image() { run_podman restore $archive } +function leak_check() { + run_podman volume ls -q + assert "$output" == "" "Leaked volumes!!!" + local exit_code=$? + run_podman network ls -q + # podman always exists + assert "$output" == "podman" "Leaked networks!!!" + exit_code=$((exit_code + $?)) + run_podman pod ps -q + assert "$output" == "" "Leaked pods!!!" + exit_code=$((exit_code + $?)) + run_podman ps -a -q + assert "$output" == "" "Leaked containers!!!" + exit_code=$((exit_code + $?)) + + run_podman images --all --format '{{.Repository}}:{{.Tag}} {{.ID}}' + for line in "${lines[@]}"; do + set $line + if [[ "$1" == "$PODMAN_TEST_IMAGE_FQN" ]]; then + found_needed_image=1 + elif [[ "$1" == "$PODMAN_SYSTEMD_IMAGE_FQN" ]]; then + # This is a big image, don't force unnecessary pulls + : + else + exit_code=$((exit_code + 1)) + echo "Leaked image $1 $2" + fi + done + + # Make sure desired image is present + if [[ -z "$found_needed_image" ]]; then + exit_code=$((exit_code + 1)) + die "$PODMAN_TEST_IMAGE_FQN was removed" + fi + return $exit_code +} + +function clean_setup() { + local actions=( + "pod rm -t 0 --all --force --ignore" + "rm -t 0 --all --force --ignore" + "network prune --force" + "volume rm -a -f" + ) + for action in "${actions[@]}"; do + run_podman '?' $action + + # The -f commands should never exit nonzero, but if they do we want + # to know about it. + # FIXME: someday: also test for [[ -n "$output" ]] - can't do this + # yet because too many tests don't clean up their containers + if [[ $status -ne 0 ]]; then + echo "# [teardown] $_LOG_PROMPT podman $action" >&3 + for line in "${lines[*]}"; do + echo "# $line" >&3 + done + + # Special case for timeout: check for locks (#18514) + if [[ $status -eq 124 ]]; then + echo "# [teardown] $_LOG_PROMPT podman system locks" >&3 + run $PODMAN system locks + for line in "${lines[*]}"; do + echo "# $line" >&3 + done + fi + fi + done + + # ...including external (buildah) ones + run_podman ps --all --external --format '{{.ID}} {{.Names}}' + for line in "${lines[@]}"; do + set $line + echo "# setup(): removing stray external container $1 ($2)" >&3 + run_podman '?' rm -f $1 + if [[ $status -ne 0 ]]; then + echo "# [setup] $_LOG_PROMPT podman rm -f $1" >&3 + for errline in "${lines[@]}"; do + echo "# $errline" >&3 + done + fi + done + + # Clean up all images except those desired. + # 2023-06-26 REMINDER: it is tempting to think that this is clunky, + # wouldn't it be safer/cleaner to just 'rmi -a' then '_prefetch $IMAGE'? + # Yes, but it's also tremendously slower: 29m for a CI run, to 39m. + # Image loads are slow. + found_needed_image= + run_podman '?' images --all --format '{{.Repository}}:{{.Tag}} {{.ID}}' + + for line in "${lines[@]}"; do + set $line + if [[ "$1" == "$PODMAN_TEST_IMAGE_FQN" ]]; then + if [[ -z "$PODMAN_TEST_IMAGE_ID" ]]; then + # This will probably only trigger the 2nd time through setup + PODMAN_TEST_IMAGE_ID=$2 + fi + found_needed_image=1 + elif [[ "$1" == "$PODMAN_SYSTEMD_IMAGE_FQN" ]]; then + # This is a big image, don't force unnecessary pulls + : + else + # Always remove image that doesn't match by name + echo "# setup(): removing stray image $1" >&3 + run_podman rmi --force "$1" >/dev/null 2>&1 || true + + # Tagged image will have same IID as our test image; don't rmi it. + if [[ $2 != "$PODMAN_TEST_IMAGE_ID" ]]; then + echo "# setup(): removing stray image $2" >&3 + run_podman rmi --force "$2" >/dev/null 2>&1 || true + fi + fi + done + + # Make sure desired image is present + if [[ -z "$found_needed_image" ]]; then + _prefetch $PODMAN_TEST_IMAGE_FQN + fi +} + # END setup/teardown tools ############################################################################### # BEGIN podman helpers @@ -449,6 +512,14 @@ function run_podman() { fi } +function run_podman_testing() { + printf "\n%s %s %s %s\n" "$(timestamp)" "$_LOG_PROMPT" "$PODMAN_TESTING" "$*" + run $PODMAN_TESTING "$@" + if [[ $status -ne 0 ]]; then + echo "$output" + die "Unexpected error from testing helper, which should always always succeed" + fi +} # Wait for certain output from a container, indicating that it's ready. function wait_for_output { @@ -1178,5 +1249,9 @@ function wait_for_command_output() { die "Timed out waiting for '$cmd' to return '$want'" } +function make_random_file() { + dd if=/dev/urandom of="$1" bs=1 count=${2:-$((${RANDOM} % 8192 + 1024))} status=none +} + # END miscellaneous tools ############################################################################### diff --git a/test/system/helpers.network.bash b/test/system/helpers.network.bash index c8366ee419..955da500cd 100644 --- a/test/system/helpers.network.bash +++ b/test/system/helpers.network.bash @@ -1,5 +1,7 @@ # -*- bash -*- +_cached_has_pasta= +_cached_has_slirp4netns= ### Feature Checks ############################################################# @@ -31,9 +33,28 @@ function skip_if_no_ipv6() { fi } +# has_slirp4netns - Check if the slirp4netns(1) command is available +function has_slirp4netns() { + if [[ -z "$_cached_has_slirp4netns" ]]; then + _cached_has_slirp4netns=n + run_podman info --format '{{.Host.Slirp4NetNS.Executable}}' + if [[ -n "$output" ]]; then + _cached_has_slirp4netns=y + fi + fi + test "$_cached_has_slirp4netns" = "y" +} + # has_pasta() - Check if the pasta(1) command is available function has_pasta() { - command -v pasta >/dev/null + if [[ -z "$_cached_has_pasta" ]]; then + _cached_has_pasta=n + run_podman info --format '{{.Host.Pasta.Executable}}' + if [[ -n "$output" ]]; then + _cached_has_pasta=y + fi + fi + test "$_cached_has_pasta" = "y" } # skip_if_no_pasta() - Skip current test if pasta(1) is not available @@ -208,15 +229,31 @@ EOF # ipv4_get_route_default() - Print first default IPv4 route reported by netlink # $1: Optional output of 'ip -j -4 route show' from a different context function ipv4_get_route_default() { - local jq_expr='[.[] | select(.dst == "default").gateway] | .[0]' - echo "${1:-$(ip -j -4 route show)}" | jq -rM "${jq_expr}" + local jq_gw='[.[] | select(.dst == "default").gateway] | .[0]' + local jq_nh='[.[] | select(.dst == "default").nexthops[0].gateway] | .[0]' + local out + + out="$(echo "${1:-$(ip -j -4 route show)}" | jq -rM "${jq_gw}")" + if [ "${out}" = "null" ]; then + out="$(echo "${1:-$(ip -j -4 route show)}" | jq -rM "${jq_nh}")" + fi + + echo "${out}" } # ipv6_get_route_default() - Print first default IPv6 route reported by netlink # $1: Optional output of 'ip -j -6 route show' from a different context function ipv6_get_route_default() { - local jq_expr='[.[] | select(.dst == "default").gateway] | .[0]' - echo "${1:-$(ip -j -6 route show)}" | jq -rM "${jq_expr}" + local jq_gw='[.[] | select(.dst == "default").gateway] | .[0]' + local jq_nh='[.[] | select(.dst == "default").nexthops[0].gateway] | .[0]' + local out + + out="$(echo "${1:-$(ip -j -6 route show)}" | jq -rM "${jq_gw}")" + if [ "${out}" = "null" ]; then + out="$(echo "${1:-$(ip -j -6 route show)}" | jq -rM "${jq_nh}")" + fi + + echo "${out}" } # ether_get_mtu() - Get MTU of first Ethernet-like link @@ -369,3 +406,27 @@ function tcp_port_probe() { : | nc "${address}" "${1}" } + +### Pasta Helpers ############################################################## + +function default_ifname() { + local jq_expr='[.[] | select(.dst == "default").dev] | .[0]' + local jq_expr_nh='[.[] | select(.dst == "default").nexthops[0].dev] | .[0]' + local ip_ver="${1}" + local out + + out="$(ip -j -"${ip_ver}" route show | jq -rM "${jq_expr}")" + if [ "${out}" = "null" ]; then + out="$(ip -j -"${ip_ver}" route show | jq -rM "${jq_expr_nh}")" + fi + + echo "${out}" +} + +function default_addr() { + local ip_ver="${1}" + local ifname="${2:-$(default_ifname "${ip_ver}")}" + + local expr='[.[0].addr_info[] | select(.deprecated != true)][0].local' + ip -j -"${ip_ver}" addr show "${ifname}" | jq -rM "${expr}" +} diff --git a/test/system/helpers.registry.bash b/test/system/helpers.registry.bash index b6986e64cd..605f4bd34f 100644 --- a/test/system/helpers.registry.bash +++ b/test/system/helpers.registry.bash @@ -112,6 +112,13 @@ function stop_registry() { # Make sure socket is closed if tcp_port_probe $PODMAN_LOGIN_REGISTRY_PORT; then + # for debugging flakes + echo "" + echo "ps auxww --forest" + ps auxww --forest + echo "" + echo "lsof -i -P" + lsof -i -P die "Socket still seems open" fi } diff --git a/test/system/setup_suite.bash b/test/system/setup_suite.bash index ab3791cb2b..4dd59981db 100644 --- a/test/system/setup_suite.bash +++ b/test/system/setup_suite.bash @@ -17,9 +17,8 @@ function setup_suite() { IFS=" " - # Can't use $BATS_SUITE_TMPDIR because podman barfs: - # Error: the specified runroot is longer than 50 characters - export PODMAN_LOGIN_WORKDIR=$(mktemp -d --tmpdir=${BATS_TMPDIR:-${TMPDIR:-/tmp}} podman-bats-registry.XXXXXX) + export PODMAN_LOGIN_WORKDIR="$BATS_SUITE_TMPDIR/podman-bats-registry" + mkdir "$PODMAN_LOGIN_WORKDIR" export PODMAN_LOGIN_USER="user$(random_string 4)" export PODMAN_LOGIN_PASS="pw$(random_string 15)" @@ -31,9 +30,21 @@ function setup_suite() { # The above does not handle errors. Do a final confirmation. assert "$PODMAN_LOGIN_REGISTRY_PORT" != "" \ "Unable to set PODMAN_LOGIN_REGISTRY_PORT" + + clean_setup } # Run at the very end of all tests. Useful for cleanup of non-BATS tmpdirs. function teardown_suite() { stop_registry + local exit_code=$? + + # After all tests make sure there are no leaks and cleanup if there are + leak_check + if [ $? -gt 0 ]; then + exit_code=$((exit_code + 1)) + clean_setup + fi + + return $exit_code } diff --git a/test/test_podman_baseline.sh b/test/test_podman_baseline.sh deleted file mode 100755 index 5ef2d1bda0..0000000000 --- a/test/test_podman_baseline.sh +++ /dev/null @@ -1,613 +0,0 @@ -#!/usr/bin/env bash -# test_podman_baseline.sh -# A script to be run at the command line with Podman installed. -# This should be run against a new kit to provide base level testing -# on a freshly installed machine with no images or container in -# play. This currently needs to be run as root. -# -# Please leave the whale-says test as the last test in this script. -# It makes it easier to identify if the script has finished or not. -# -# To run this command: -# -# /bin/bash -v test_podman_baseline.sh -d # Install and then deinstall Docker -# /bin/bash -v test_podman_baseline.sh -n # Do not perform docker test -# /bin/bash -v test_podman_baseline.sh -e # Stop on error -# /bin/bash -v test_podman_baseline.sh # Continue on error -# - -####### -# See if we want to stop on errors and/or install and then remove Docker. -####### -HOST_PORT="${HOST_PORT:-8080}" -showerror=0 -installdocker=0 -usedocker=1 -while getopts "den" opt; do - case "$opt" in - d) installdocker=1 - ;; - e) showerror=1 - ;; - n) usedocker=0 - ;; - esac -done - -if [ "$installdocker" -eq 1 ] && [ "usedocker" -ne 0 ] -then - echo "Script will install and then deinstall Docker." -fi - -if [ "$showerror" -eq 1 ] -then - echo "Script will stop on unexpected errors." - set -e -fi - -pkg_manager=`command -v dnf` -if [ -z "$pkg_manager" ]; then - pkg_manager=`command -v yum` -fi - -echo "Package manager binary: $pkg_manager" - -######## -# Next two commands should return blanks -######## -podman images -podman ps --all - -######## -# Run ls in redis container, this should work -######## -ctrid=$(podman pull docker.io/library/redis:4-alpine3.8) -podman run $ctrid ls / - -######## -# Remove images and containers -######## -podman rm --all -podman rmi --all - -######## -# Create Fedora based image -######## -image=$(podman pull registry.fedoraproject.org/fedora:latest) -echo $image - -######## -# Run container and display contents in /etc -######## -podman run --rm $image ls -alF /etc - -######## -# Test networking, bind mounting a file, stdin/stdout redirect -######## -echo "Testing networking: ..." -port_test_failed=0 -txt1="Hello, Podman" -echo "$txt1" > /tmp/hello.txt -podman run -d --name myweb -p "$HOST_PORT:80" -w /var/www -v /tmp/hello.txt:/var/www/index.txt busybox httpd -f -p 80 -echo "$txt1" | podman exec -i myweb sh -c "cat > /var/www/index2.txt" -txt2=$( podman exec myweb cat /var/www/index2.txt ) -[ "x$txt1" == "x$txt2" ] && echo "PASS1" || { echo "FAIL1"; port_test_failed=1; } -txt2=$( podman run --rm --net host busybox wget -qO - http://localhost:$HOST_PORT/index.txt ) -[ "x$txt1" == "x$txt2" ] && echo "PASS2" || { echo "FAIL2"; port_test_failed=1; } -txt2=$( podman run --rm --net host busybox wget -qO - http://localhost:$HOST_PORT/index2.txt ) -[ "x$txt1" == "x$txt2" ] && echo "PASS3" || { echo "FAIL3"; port_test_failed=1; } -# podman run --rm --net container:myweb --add-host myweb:127.0.0.1 busybox wget -qO - http://myweb/index.txt -rm /tmp/hello.txt -podman stop myweb -podman rm myweb -[ "0$port_test_failed" -eq 1 ] && [ "0$showerror" -eq 1 ] && { - echo "networking test failed"; - exit -1; -} - - -######## -# pull and run many containers in parallel, test locks ..etc. -######## -prun_test_failed=0 -podman rmi docker.io/library/busybox:latest > /dev/null || : -for i in `seq 10` -do ( podman run -d --name b$i docker.io/library/busybox:latest busybox httpd -f -p 80 )& -done -echo -e "\nwaiting for creation...\n" -wait -echo -e "\ndone\n" -# assert we have 10 running containers -count=$( podman ps -q | wc -l ) -[ "x$count" == "x10" ] && echo "PASS" || { echo "FAIL, expecting 10 found $count"; prun_test_failed=1; } -[ "0$prun_test_failed" -eq 1 ] && [ "0$showerror" -eq 1 ] && { - echo "was expecting 10 running containers"; - exit -1; -} - -prun_test_failed=0 -for i in `seq 10`; do ( podman stop -t=1 b$i; podman rm b$i )& done -echo -e "\nwaiting for deletion...\n" -wait -echo -e "\ndone\n" -# assert we have 0 running containers -count=$( podman ps -q | wc -l ) -[ "x$count" == "x0" ] && echo "PASS" || { echo "FAIL, expecting 0 found $count"; prun_test_failed=1; } -[ "0$prun_test_failed" -eq 1 ] && [ "0$showerror" -eq 1 ] && { - echo "was expecting 0 running containers"; - exit -1; -} - - - -######## -# run many containers in parallel for an existing image, test locks ..etc. -######## -prun_test_failed=0 -podman pull docker.io/library/busybox:latest > /dev/null || : -for i in `seq 10` -do ( podman run -d --name c$i docker.io/library/busybox:latest busybox httpd -f -p 80 )& -done -echo -e "\nwaiting for creation...\n" -wait -echo -e "\ndone\n" -# assert we have 10 running containers -count=$( podman ps -q | wc -l ) -[ "x$count" == "x10" ] && echo "PASS" || { echo "FAIL, expecting 10 found $count"; prun_test_failed=1; } -[ "0$prun_test_failed" -eq 1 ] && [ "0$showerror" -eq 1 ] && { - echo "was expecting 10 running containers"; - exit -1; -} - - -for i in `seq 10`; do ( podman stop -t=1 c$i; podman rm c$i )& done -echo -e "\nwaiting for deletion...\n" -wait -echo -e "\ndone\n" -# assert we have 0 running containers -count=$( podman ps -q | wc -l ) -[ "x$count" == "x0" ] && echo "PASS" || { echo "FAIL, expecting 0 found $count"; prun_test_failed=1; } -[ "0$prun_test_failed" -eq 1 ] && [ "0$showerror" -eq 1 ] && { - echo "was expecting 0 running containers"; - exit -1; -} - - -######## -# Run Java in the container - should ERROR but never stop -######## -podman run $image java 2>&1 || echo $? - -######## -# Clean out containers -######## -podman rm --all - -######## -# Install java onto the container, commit it, then run it showing java usage -######## -podman run --net=host $image dnf -y install java -javaimage=$(podman ps --all -q) -podman commit $javaimage javaimage -podman run javaimage java -version - -######## -# Cleanup containers and images -######## -podman rm --all -podman rmi --all - -######## -# Check images and containers, should be blanks -######## -podman ps --all -podman images - -######## -# Create Fedora based container -######## -image=$(podman pull registry.fedoraproject.org/fedora:latest) -echo $image -podman run $image ls / - -######## -# Create shell script to test on -######## -FILE=./runecho.sh -/bin/cat <$FILE -#!/usr/bin/env bash -for i in {1..9}; -do - echo "This is a new container pull ipbabble [" \$i "]" -done -EOM -chmod +x $FILE - -######## -# Copy and run file on container -######## -ctrid=$(podman ps --all -q) -mnt=$(podman mount $ctrid) -cp ./runecho.sh ${mnt}/tmp/runecho.sh -podman umount $ctrid -podman commit $ctrid runecho -podman run runecho ./tmp/runecho.sh - -######## -# Inspect the container, verifying above was put into it -######## -podman inspect $ctrid - -######## -# Check the images there should be a runecho image -######## -podman images - -######## -# Remove the containers -######## -podman rm -a - -if [ "$usedocker" -ne 0 ]; then - if [ "$installdocker" -eq 1 ] - then - ######## - # Install Docker, but not for long! - ######## - $package_manager -y install docker - fi - systemctl restart docker - - ######## - # Push fedora-bashecho to the Docker daemon - ######## - podman push runecho docker-daemon:fedora-bashecho:latest - - ######## - # Run fedora-bashecho pull Docker - ######## - docker run fedora-bashecho ./tmp/runecho.sh - - if [ "$installdocker" -eq 1 ] - then - ######## - # Time to remove Docker - ######## - $package_manager -y remove docker - fi -fi - -######## -# Clean up Podman -######## -podman rm --all -podman rmi --all - -######## -# Set up xfs mount for overlay quota -######## - -# 1.004608 MB is 1,004,608 bytes. The container overhead is 4608 bytes (or 9 512 byte pages), so this allocates 1 MB of usable storage -PODMANBASE="--storage-driver overlay --storage-opt overlay.size=1.004608M --root /tmp/podman_test/crio" -TMPDIR=/tmp/podman_test -mkdir $TMPDIR -dd if=/dev/zero of=$TMPDIR/virtfs bs=1024 count=30720 -device=$(losetup -f | tr -d '[:space:]') -losetup $device $TMPDIR/virtfs -mkfs.xfs $device -mount -t xfs -o prjquota $device $TMPDIR - -######## -# Expected to succeed -######## -podman $PODMANBASE run --security-opt label=disable docker.io/library/alpine:latest sh -c 'touch file.txt && dd if=/dev/zero of=file.txt count=1048576 bs=1' -rc=$? -if [ $rc == 0 ]; -then - echo "Overlay test within limits passed" -else - echo "Overlay test within limits failed" -fi - -before=`xfs_quota -x -c 'report -N -p' $TMPDIR | grep -c ^#` -podman $PODMANBASE volume create -o o=noquota test-no-quota -after=`xfs_quota -x -c 'report -N -p' $TMPDIR | grep -c ^#` - -if [ $before != $after ]; -then - echo "Test -o=noquota doesn't create a projid failed" -else - echo "Test -o=noquota doesn't create a projid passed" -fi - -before=`xfs_quota -x -c 'report -N -p' $TMPDIR | grep -c ^#` -podman $PODMANBASE volume create -o test-no-quota -after=`xfs_quota -x -c 'report -N -p' $TMPDIR | grep -c ^#` - -if [ $before == $after ]; -then - echo "Test without -o=noquota creates a projid failed" -else - echo "Test without -o=noquota creates a projid passed" -fi - -######## -# Expected to fail -######## - -if [ "$showerror" -ne 1 ]; then - podman $PODMANBASE run --security-opt label=disable docker.io/library/alpine:latest sh -c 'touch file.txt && dd if=/dev/zero of=file.txt count=1048577 bs=1' - rc=$? - if [ $rc != 0 ]; - then - echo "Overlay test outside limits passed" - else - echo "Overlay test outside limits failed" - fi -fi - -######## -# Clean up Podman -######## -podman rm --all -podman rmi --all -umount $TMPDIR -l -losetup -d $device -rm -rf /tmp/podman_test - -######## -# Prep for UserNamespace testing -# Thanks @marcov! -######## -PODMAN_OPTS_VOLUMES="-v /tmp/voltest/vol-0:/mnt/vol-0 -v /tmp/voltest/vol-1000:/mnt/vol-1000 -v /tmp/voltest/vol-100000:/mnt/vol-100000 -v /tmp/voltest/vol-101000:/mnt/vol-101000" -PODMAN_OPTS="$PODMAN_OPTS_VOLUMES --rm" -PODMAN_ID_MAPS="--uidmap=0:100000:1000000 --gidmap=0:100000:1000000" - -######## -# Make directories for UserNamespace testing -######## -mkdir -p /tmp/voltest/vol-0 -mkdir -p /tmp/voltest/vol-1000 -mkdir -p /tmp/voltest/vol-100000 -mkdir -p /tmp/voltest/vol-101000 -UIDGID=`/usr/bin/tr -cd "[:digit:]" <<< /tmp/voltest/vol-0` - -chown $UIDGID:$UIDGID /tmp/voltest/vol-0 -chown $UIDGID:$UIDGID /tmp/voltest/vol-1000 -chown $UIDGID:$UIDGID /tmp/voltest/vol-100000 -chown $UIDGID:$UIDGID /tmp/voltest/vol-101000 - -######## -# Make run test script -######## -FILE=./runtest.sh -/bin/cat <$FILE -#!/usr/bin/env bash -ls -n /mnt -for i in $(find /mnt -mindepth 1 -type d); do - touch "$i/foobar" 2>/dev/null; - echo "create $i/foobar: $?"; - /bin/rm "$i/foobar" 2>/dev/null; -done; -exit 0 -EOM -chmod +x $FILE - -######## -# Make Dockerfile -######## -FILE=./Dockerfile -/bin/cat <$FILE -FROM docker.io/library/debian:latest -ADD ./runtest.sh /runtest.sh -EOM -chmod +x $FILE - -######## -# Build container -######## -podman build -t usernamespace -f ./Dockerfile . - -######## -# Run the tests for UserNamespaces -######## -echo "Run as root with no user NS" -podman run $PODMAN_OPTS usernamespace /bin/bash runtest.sh -echo "" - -echo "Run as user 1000 with no user NS" -podman run --user=1000 $PODMAN_OPTS usernamespace /bin/bash /runtest.sh -echo "" - -echo "Run as root with user NS " -podman run $PODMAN_ID_MAPS $PODMAN_OPTS usernamespace /bin/bash /runtest.sh -echo "" - -echo "Run as user 1000 with user NS " -podman run --user=1000 $PODMAN_ID_MAPS $PODMAN_OPTS usernamespace /bin/bash /runtest.sh -echo "" - -######## -# Clean up Podman -######## -podman rm --all -podman rmi --all -rm -f ./runtest.sh -rm -rf /tmp/voltest -rm -f ./Dockerfile - -######## -# Build Dockerfiles for OnBuild Test -# (Thanks @clcollins!) -######## -FILE=./Dockerfile -/bin/cat <$FILE -FROM docker.io/library/alpine:latest -RUN touch /foo -ONBUILD RUN touch /bar -EOM -chmod +x $FILE - -FILE=./Dockerfile-2 -/bin/cat <$FILE -FROM onbuild-image -RUN touch /baz -EOM -chmod +x $FILE - -######## -# Build with Dockerfiles -######## -podman build -f ./Dockerfile --format=docker -t onbuild-image . -podman build -f ./Dockerfile-2 --format=docker -t result-image . - -######## -# Check for /bar /baz and /foo files -######## -podman run --network=host result-image ls -alF /bar /baz /foo - -######## -# Clean up Podman -######## -podman rm --all -podman rmi --all -rm ./Dockerfile* - -######## -# Run AppArmor rootless tests -######## -if aa-enabled >/dev/null && getent passwd 1000 >/dev/null; then - # Expected to succeed - sudo -u "#1000" podman run docker.io/library/alpine:latest echo hello - rc=$? - echo -n "rootless with no AppArmor profile " - if [ $rc == 0 ]; then - echo "passed" - else - echo "failed" - fi - - # Expected to succeed - sudo -u "#1000" podman run --security-opt apparmor=unconfined docker.io/library/alpine:latest echo hello - rc=$? - echo -n "rootless with unconfined AppArmor profile " - if [ $rc == 0 ]; then - echo "passed" - else - echo "failed" - fi - - aaFile="/tmp/aaProfile" - aaProfile="aa-demo-profile" - cat > $aaFile << EOF -#include -profile aa-demo-profile flags=(attach_disconnected,mediate_deleted) { - #include - deny mount, - deny /sys/[^f]*/** wklx, - deny /sys/f[^s]*/** wklx, - deny /sys/fs/[^c]*/** wklx, - deny /sys/fs/c[^g]*/** wklx, - deny /sys/fs/cg[^r]*/** wklx, - deny /sys/firmware/efi/efivars/** rwklx, - deny /sys/kernel/security/** rwklx, -} -EOF - - apparmor_parser -Kr $aaFile - - #Expected to pass (as root) - podman run --security-opt apparmor=$aaProfile docker.io/library/alpine:latest echo hello - rc=$? - echo -n "root with specified AppArmor profile: " - if [ $rc == 0 ]; then - echo "passed" - else - echo "failed" - fi - - #Expected to pass (as root with --privileged). - #Note that the profile should not be loaded letting the mount succeed. - podman run --privileged docker.io/library/alpine:latest sh -c "mkdir tmp2; mount --bind tmp tmp2" - rc=$? - echo -n "root with specified AppArmor profile but --privileged: " - if [ $rc == 0 ]; then - echo "passed" - else - echo "failed" - fi - #Expected to fail (as rootless) - sudo -u "#1000" podman run --security-opt apparmor=$aaProfile docker.io/library/alpine:latest echo hello - rc=$? - echo -n "rootless with specified AppArmor profile: " - if [ $rc != 0 ]; then - echo "passed" - else - echo "failed" - fi - - ######## - # Clean up Podman and $aaFile - ######## - apparmor_parser -R $aaFile - podman rm --all - podman rmi --all - sudo -u "#1000" podman rm --all - sudo -u "#1000" podman rmi --all - rm -f $aaFile -fi - -######## -# Build Dockerfile for RUN with priv'd command test -######## -FILE=./Dockerfile -/bin/cat <$FILE -FROM alpine -RUN apk add nginx -EOM -chmod +x $FILE - -######## -# Build with the Dockerfile -######## -podman build -f Dockerfile -t build-priv - -######## -# Cleanup -######## -podman rm -a -f -t 0 -podman rmi -a -f -rm ./Dockerfile - -######## -# Build Dockerfile for WhaleSays test -######## -FILE=./Dockerfile -/bin/cat <$FILE -FROM pharshal/whalesay:latest -RUN apt-get -y update && apt-get install -y fortunes -CMD /usr/games/fortune -a | cowsay -EOM -chmod +x $FILE - -######## -# Build with the Dockerfile -######## -podman build -f Dockerfile -t whale-says - -######## -# Run the container to see what the whale says -######## -podman run whale-says - -######## -# NOTE: Please leave the whale-says as the last test -# in this script. -######## - -######## -# Clean up Podman and /tmp -######## -podman rm --all -podman rmi --all -rm ./Dockerfile* diff --git a/test/test_podman_build.sh b/test/test_podman_build.sh deleted file mode 100644 index 29b7354b1c..0000000000 --- a/test/test_podman_build.sh +++ /dev/null @@ -1,217 +0,0 @@ -#!/usr/bin/env bash -# -# test_podman_build.sh -# -# Used to test 'podman build' functionality "by hand" -# until we're able to install Buildah in the Travis CI -# test system. -# -# Requires podman and Buildah to be installed on the -# system. This needs to be run from the libpod -# directory after cloning the libpod repo. -# -# To run: -# /bin/bash -v test_podman_build.sh -# - -HOME=`pwd` - -echo ######################################################## -echo test "build-from-scratch" -echo ######################################################## - TARGET=scratch-image - podman build -q=True -t $TARGET $HOME/test/build/from-scratch - CID=$(buildah from $TARGET) - buildah rm $CID - podman build -q=False --build-arg HOME=/ --build-arg VERSION=0.1 -t $TARGET $HOME/test/build/from-scratch - CID=$(buildah from $TARGET) - buildah rm $CID - podman build --quiet=True -t $TARGET $HOME/test/build/from-scratch - CID=$(buildah from $TARGET) - buildah rm $CID - podman rmi -f $(podman images -q) - podman images -q - - -echo ######################################################## -echo test "build directory before other options create a tag" -echo ######################################################## -TARGET=tagged-image -podman build $HOME/test/build/from-scratch --quiet=True -t $TARGET -podman images | grep tagged-image - -echo ######################################################## -echo test "build-preserve-subvolumes" -echo ######################################################## - TARGET=volume-image - podman build -t $TARGET $HOME/test/build/preserve-volumes - CID=$(buildah from $TARGET) - ROOT=$(buildah mount $CID) - test -s $ROOT/vol/subvol/subsubvol/subsubvolfile - test -s $ROOT/vol/subvol/subvolfile - test -s $ROOT/vol/volfile - test -s $ROOT/vol/Dockerfile - test -s $ROOT/vol/Dockerfile2 - test -s $ROOT/vol/anothervolfile - buildah rm $CID - podman rmi $(buildah --debug=false images -q) - buildah --debug=false images -q - -echo ######################################################## -echo test "build-git-context" -echo ######################################################## - TARGET=giturl-image - # Any repo should do, but this one is small and is FROM: scratch. - GITREPO=git://github.com/projectatomic/nulecule-library - podman build -t $TARGET "$GITREPO" - CID=$(buildah from $TARGET) - buildah rm $CID - podman rmi $(buildah --debug=false images -q) - podman images -q - - -echo ######################################################## -echo test "build-github-context" -echo ######################################################## - TARGET=github-image - # Any repo should do, but this one is small and is FROM: scratch. - GITREPO=github.com/projectatomic/nulecule-library - podman build -t $TARGET "$GITREPO" - CID=$(buildah from $TARGET) - buildah rm $CID - buildah --debug=false images -q - podman rmi $(buildah --debug=false images -q) - podman images -q - - -echo ######################################################## -echo test "build-additional-tags" -echo ######################################################## - TARGET=scratch-image - TARGET2=another-scratch-image - TARGET3=so-many-scratch-images - podman build -t $TARGET -t $TARGET2 -t $TARGET3 -f $HOME/test/build/from-scratch/Dockerfile - buildah --debug=false images - CID=$(buildah from $TARGET) - buildah rm $CID - CID=$(buildah from library/$TARGET2) - buildah rm $CID - CID=$(buildah from $TARGET3:latest) - buildah rm $CID - podman rmi -f $(buildah --debug=false images -q) - podman images -q - - -echo ######################################################## -echo test "build-volume-perms" -echo ######################################################## - TARGET=volume-image - podman build -t $TARGET $HOME/test/build/volume-perms - CID=$(buildah from $TARGET) - ROOT=$(buildah mount $CID) - test -s $ROOT/vol/subvol/subvolfile - stat -c %f $ROOT/vol/subvol - #Output s/b 41ed - buildah rm $CID - podman rmi $(buildah --debug=false images -q) - podman images -q - - -echo ######################################################## -echo test "build-from-glob" -echo ######################################################## - TARGET=alpine-image - podman build -t $TARGET -file Dockerfile2.glob $HOME/test/build/from-multiple-files - CID=$(buildah from $TARGET) - ROOT=$(buildah mount $CID) - cmp $ROOT/Dockerfile1.alpine $HOME/test/build/from-multiple-files/Dockerfile1.alpine - cmp $ROOT/Dockerfile2.withfrom $HOME/test/build/from-multiple-files/Dockerfile2.withfrom - buildah rm $CID - podman rmi $(buildah --debug=false images -q) - podman images -q - - -echo ######################################################## -echo test "build-from-multiple-files-one-from" -echo ######################################################## - TARGET=scratch-image - podman build -t $TARGET -file $HOME/test/build/from-multiple-files/Dockerfile1.scratch -file $HOME/test/build/from-multiple-files/Dockerfile2.nofrom - CID=$(buildah from $TARGET) - ROOT=$(buildah mount $CID) - cmp $ROOT/Dockerfile1 $HOME/test/build/from-multiple-files/Dockerfile1.scratch - cmp $ROOT/Dockerfile2.nofrom $HOME/test/build/from-multiple-files/Dockerfile2.nofrom - buildah rm $CID - podman rmi $(buildah --debug=false images -q) - buildah --debug=false images -q - - TARGET=alpine-image - podman build -t $TARGET -file $HOME/test/build/from-multiple-files/Dockerfile1.alpine -file $HOME/test/build/from-multiple-files/Dockerfile2.nofrom - CID=$(buildah from $TARGET) - ROOT=$(buildah mount $CID) - buildah rm $CID - podman rmi $(buildah --debug=false images -q) - buildah --debug=false images -q - - -echo ######################################################## -echo test "build-from-multiple-files-two-froms" -echo ######################################################## - TARGET=scratch-image - podman build -t $TARGET -file $HOME/test/build/from-multiple-files/Dockerfile1.scratch -file $HOME/test/build/from-multiple-files/Dockerfile2.withfrom - CID=$(buildah from $TARGET) - ROOT=$(buildah mount $CID) - cmp $ROOT/Dockerfile1 $HOME/test/build/from-multiple-files/Dockerfile1.scratch - cmp $ROOT/Dockerfile2.withfrom $HOME/test/build/from-multiple-files/Dockerfile2.withfrom - test -s $ROOT/etc/passwd - buildah rm $CID - podman rmi $(buildah --debug=false images -q) - buildah --debug=false images -q - - TARGET=alpine-image - podman build -t $TARGET -file $HOME/test/build/from-multiple-files/Dockerfile1.alpine -file $HOME/test/build/from-multiple-files/Dockerfile2.withfrom - CID=$(buildah from $TARGET) - ROOT=$(buildah mount $CID) - cmp $ROOT/Dockerfile1 $HOME/test/build/from-multiple-files/Dockerfile1.alpine - cmp $ROOT/Dockerfile2.withfrom $HOME/test/build/from-multiple-files/Dockerfile2.withfrom - test -s $ROOT/etc/passwd - buildah rm $CID - podman rmi $(buildah --debug=false images -q) - buildah --debug=false images -q - -echo ######################################################## -echo test "build-from-multiple-files-two-froms" with "-f -" -echo ######################################################## - TARGET=scratch-image - cat $HOME/test/build/from-multiple-files/Dockerfile1.alpine | podman build -t ${TARGET} -file - -file Dockerfile2.withfrom $HOME/test/build/from-multiple-files - CID=$(buildah from $TARGET) - ROOT=$(buildah mount $CID) - cmp $ROOT/Dockerfile1 $HOME/test/build/from-multiple-files/Dockerfile1.alpine - cmp $ROOT/Dockerfile2.withfrom $HOME/test/build/from-multiple-files/Dockerfile2.withfrom - test -s $ROOT/etc/passwd - buildah rm $CID - podman rmi $(buildah --debug=false images -q) - buildah --debug=false images -q - -echo ######################################################## -echo test "build with preprocessor" -echo ######################################################## - - TARGET=alpine-image - podman build -q -t ${TARGET} -f Decomposed.in $HOME/test/build/preprocess - buildah --debug=false images - CID=$(buildah from $TARGET) - buildah rm $CID - podman rmi $(buildah --debug=false images -q) - buildah --debug=false images -q - -echo ######################################################## -echo test "build with priv'd RUN" -echo ######################################################## - - TARGET=alpinepriv - podman build -q -t ${TARGET} -f $HOME/test/build/run-privd $HOME/test/build/run-privd - buildah --debug=false images - CID=$(buildah from $TARGET) - buildah rm $CID - podman rmi $(buildah --debug=false images -q) - buildah --debug=false images -q diff --git a/test/test_podman_pods.sh b/test/test_podman_pods.sh deleted file mode 100755 index cd72fce7ce..0000000000 --- a/test/test_podman_pods.sh +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env bash -# test_podman_pods.sh -# A script to be run at the command line with Podman installed. -# This should be run against a new kit to provide base level testing -# on a freshly installed machine with no images or container in -# play. This currently needs to be run as root. -# -# -# To run this command: -# -# /bin/bash -v test_podman_baseline.sh -e # Stop on error -# /bin/bash -v test_podman_baseline.sh # Continue on error -# - -set -x - -# This scripts needs the jq json parser -if [ -z $(command -v jq2) ]; then - echo "This script requires the jq parser" - exit 1 -fi - -# process input args -stoponerror=0 -while getopts "den" opt; do - case "$opt" in - e) stoponerror=1 - ;; - esac -done - - -if [ "$stoponerror" -eq 1 ] -then - echo "Script will stop on unexpected errors." - set -e - trap "Failed test ..." ERR -fi - - -######## -# Create a named and unnamed pod -######## -podman pod create --name foobar -podid=$(podman pod create) - -######## -# Delete a named and unnamed pod -######## -podman pod rm foobar -podman pod rm $podid - -######## -# Create a named pod and run a container in it -######## -podman pod create --name foobar -ctrid=$(podman run --pod foobar -dt docker.io/library/alpine:latest top) -podman ps --no-trunc | grep $ctrid - -######## -# Containers in a pod share network namespace -######## -podman run -dt --pod foobar docker.io/library/nginx:latest -podman run -it --rm --pod foobar registry.fedoraproject.org/fedora-minimal:29 curl http://localhost - -######## -# There should be 3 containers running now -######## -let numContainers=$(podman pod ps --format json | jq -r '.[0] .numberOfContainers') -[ $numContainers -eq 3 ] - -######## -# Pause a container in a pod -######## -podman pause $ctrid -[ $(podman ps -a -f status=paused --format json | jq -r '.[0] .id') == $ctrid ] - -######## -# Unpause a container in a pod -######## -podman unpause $ctrid -podman ps -q --no-trunc | grep $ctrid - -######## -# Stop a pod and its containers -######## -podman pod stop foobar -[ $(podman inspect $ctrid | jq -r '.[0] .State .Running') == "false" ] - -######## -# Start a pod and its containers -######## -podman pod start foobar -podman run -it --rm --pod foobar registry.fedoraproject.org/fedora-minimal:29 curl http://localhost - -######## -# Pause a pod and its containers -######## -podman pod pause foobar -[ $(podman pod ps --format json | jq -r '.[0] .status') == "Paused" ] - -######## -# Unpause a pod and its containers -######## -podman pod unpause foobar -podman run -it --rm --pod foobar registry.fedoraproject.org/fedora-minimal:29 curl http://localhost - -######## -# Kill a pod and its containers -######## -podman pod kill foobar -[ $(podman inspect $ctrid | jq -r '.[0] .State .Running') == "false" ] - -######## -# Remove all pods and their containers -######## -podman pod rm -t 0 -fa diff --git a/test/tools/Makefile b/test/tools/Makefile index ff1ff82eb3..810f5e6145 100644 --- a/test/tools/Makefile +++ b/test/tools/Makefile @@ -22,7 +22,8 @@ $(BUILDDIR): \ $(BUILDDIR)/goimports \ $(BUILDDIR)/go-md2man \ $(BUILDDIR)/git-validation \ - $(BUILDDIR)/ginkgo + $(BUILDDIR)/ginkgo \ + $(BUILDDIR)/swagger $(BUILDDIR)/goimports: $(SOURCES) $(GO_BUILD) -o $@ ./vendor/golang.org/x/tools/cmd/goimports @@ -35,3 +36,6 @@ $(BUILDDIR)/git-validation: $(SOURCES) $(BUILDDIR)/ginkgo: $(SOURCES) $(GO_BUILD) -o $@ ./vendor/github.com/onsi/ginkgo/v2/ginkgo + +$(BUILDDIR)/swagger: $(SOURCES) + $(GO_BUILD) -o $@ ./vendor/github.com/go-swagger/go-swagger/cmd/swagger diff --git a/test/tools/go.mod b/test/tools/go.mod index 9efb1c36c2..6757d22175 100644 --- a/test/tools/go.mod +++ b/test/tools/go.mod @@ -3,22 +3,73 @@ module github.com/containers/podman/test/tools go 1.20 require ( - github.com/cpuguy83/go-md2man/v2 v2.0.3 - github.com/onsi/ginkgo/v2 v2.14.0 + github.com/cpuguy83/go-md2man/v2 v2.0.4 + github.com/go-swagger/go-swagger v0.30.5 + github.com/onsi/ginkgo/v2 v2.19.0 github.com/vbatts/git-validation v1.2.1 - golang.org/x/tools v0.17.0 + golang.org/x/tools v0.22.0 ) require ( + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.0 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/fatih/color v1.15.0 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect - github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/errors v0.20.4 // indirect + github.com/go-openapi/inflect v0.19.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/loads v0.21.2 // indirect + github.com/go-openapi/runtime v0.26.0 // indirect + github.com/go-openapi/spec v0.20.9 // indirect + github.com/go-openapi/strfmt v0.21.7 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-openapi/validate v0.22.1 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/gorilla/handlers v1.5.1 // indirect github.com/hashicorp/go-version v1.3.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/huandu/xstrings v1.3.3 // indirect + github.com/imdario/mergo v0.3.12 // indirect + github.com/jessevdk/go-flags v1.5.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect github.com/magefile/mage v1.14.0 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/sys v0.15.0 // indirect + github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.16.0 // indirect + github.com/subosito/gotenv v1.4.2 // indirect + github.com/toqueteos/webbrowser v1.2.0 // indirect + go.mongodb.org/mongo-driver v1.11.3 // indirect + golang.org/x/crypto v0.17.0 // indirect + golang.org/x/mod v0.18.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/text v0.15.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/test/tools/go.sum b/test/tools/go.sum index a4a682be2b..b1019aaeae 100644 --- a/test/tools/go.sum +++ b/test/tools/go.sum @@ -1,58 +1,725 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= +github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= +github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= +github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4= +github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= +github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= +github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= +github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= +github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= +github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= +github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= +github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= +github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= +github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-swagger/go-swagger v0.30.5 h1:SQ2+xSonWjjoEMOV5tcOnZJVlfyUfCBhGQGArS1b9+U= +github.com/go-swagger/go-swagger v0.30.5/go.mod h1:cWUhSyCNqV7J1wkkxfr5QmbcnCewetCdvEXqgPvbc/Q= +github.com/go-swagger/scan-repo-boundary v0.0.0-20180623220736-973b3573c013 h1:l9rI6sNaZgNC0LnF3MiE+qTmyBA/tZAg1rtyrGbUMK0= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw= github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/magefile/mage v1.14.0 h1:6QDX3g6z1YvJ4olPhT1wksUcSa/V0a1B+pJb73fBjyo= github.com/magefile/mage v1.14.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= -github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= +github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= +github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= +github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= github.com/vbatts/git-validation v1.2.1 h1:O26LKWEtBOfnxKT/SAiFCAcQglKwyuZEKSq6AevpWJ4= github.com/vbatts/git-validation v1.2.1/go.mod h1:isqpXnI2IUKUhoYIsHg5tDmtiEXoA7KJRVsAc4+XoYw= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= +go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= +go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= +go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y= +go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/test/tools/tools.go b/test/tools/tools.go index ec18322719..98f6776680 100644 --- a/test/tools/tools.go +++ b/test/tools/tools.go @@ -7,6 +7,7 @@ package tools import ( _ "github.com/cpuguy83/go-md2man/v2" + _ "github.com/go-swagger/go-swagger/cmd/swagger" _ "github.com/onsi/ginkgo/v2/ginkgo" _ "github.com/vbatts/git-validation" _ "golang.org/x/tools/cmd/goimports" diff --git a/test/tools/vendor/github.com/Masterminds/goutils/.travis.yml b/test/tools/vendor/github.com/Masterminds/goutils/.travis.yml new file mode 100644 index 0000000000..4025e01ec4 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/goutils/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + - 1.6 + - 1.7 + - 1.8 + - tip + +script: + - go test -v + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/06e3328629952dabe3e0 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: never # options: [always|never|change] default: always diff --git a/test/tools/vendor/github.com/Masterminds/goutils/CHANGELOG.md b/test/tools/vendor/github.com/Masterminds/goutils/CHANGELOG.md new file mode 100644 index 0000000000..d700ec47f2 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/goutils/CHANGELOG.md @@ -0,0 +1,8 @@ +# 1.0.1 (2017-05-31) + +## Fixed +- #21: Fix generation of alphanumeric strings (thanks @dbarranco) + +# 1.0.0 (2014-04-30) + +- Initial release. diff --git a/vendor/github.com/rootless-containers/rootlesskit/LICENSE b/test/tools/vendor/github.com/Masterminds/goutils/LICENSE.txt similarity index 100% rename from vendor/github.com/rootless-containers/rootlesskit/LICENSE rename to test/tools/vendor/github.com/Masterminds/goutils/LICENSE.txt diff --git a/test/tools/vendor/github.com/Masterminds/goutils/README.md b/test/tools/vendor/github.com/Masterminds/goutils/README.md new file mode 100644 index 0000000000..163ffe72a8 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/goutils/README.md @@ -0,0 +1,70 @@ +GoUtils +=========== +[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) +[![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) [![Build Status](https://travis-ci.org/Masterminds/goutils.svg?branch=master)](https://travis-ci.org/Masterminds/goutils) [![Build status](https://ci.appveyor.com/api/projects/status/sc2b1ew0m7f0aiju?svg=true)](https://ci.appveyor.com/project/mattfarina/goutils) + + +GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some +string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes: +* WordUtils +* RandomStringUtils +* StringUtils (partial implementation) + +## Installation +If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this: + + go get github.com/Masterminds/goutils + +If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils. + + +## Documentation +GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) + + +## Usage +The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + } +Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + + } + +## License +GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license. + +## Issue Reporting +Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues + +## Website +* [GoUtils webpage](http://Masterminds.github.io/goutils/) diff --git a/test/tools/vendor/github.com/Masterminds/goutils/appveyor.yml b/test/tools/vendor/github.com/Masterminds/goutils/appveyor.yml new file mode 100644 index 0000000000..657564a847 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/goutils/appveyor.yml @@ -0,0 +1,21 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\Masterminds\goutils +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +build: off + +install: + - go version + - go env + +test_script: + - go test -v + +deploy: off diff --git a/test/tools/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go b/test/tools/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go new file mode 100644 index 0000000000..8dbd924858 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go @@ -0,0 +1,230 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "crypto/rand" + "fmt" + "math" + "math/big" + "unicode" +) + +/* +CryptoRandomNonAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomNonAlphaNumeric(count int) (string, error) { + return CryptoRandomAlphaNumericCustom(count, false, false) +} + +/* +CryptoRandomAscii creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAscii(count int) (string, error) { + return CryptoRandom(count, 32, 127, false, false) +} + +/* +CryptoRandomNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomNumeric(count int) (string, error) { + return CryptoRandom(count, 0, 0, false, true) +} + +/* +CryptoRandomAlphabetic creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphabetic(count int) (string, error) { + return CryptoRandom(count, 0, 0, true, false) +} + +/* +CryptoRandomAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphaNumeric(count int) (string, error) { + return CryptoRandom(count, 0, 0, true, true) +} + +/* +CryptoRandomAlphaNumericCustom creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { + return CryptoRandom(count, 0, 0, letters, numbers) +} + +/* +CryptoRandom creates a random string based on a variety of options, using using golang's crypto/rand source of randomness. +If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, +unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. +If chars is not nil, characters stored in chars that are between start and end are chosen. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode int) to start at + end - the position in set of chars (ASCII/Unicode int) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + +Returns: + string - the random string + error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) +*/ +func CryptoRandom(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { + if count == 0 { + return "", nil + } else if count < 0 { + err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") + return "", err + } + if chars != nil && len(chars) == 0 { + err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") + return "", err + } + + if start == 0 && end == 0 { + if chars != nil { + end = len(chars) + } else { + if !letters && !numbers { + end = math.MaxInt32 + } else { + end = 'z' + 1 + start = ' ' + } + } + } else { + if end <= start { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) + return "", err + } + + if chars != nil && end > len(chars) { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) + return "", err + } + } + + buffer := make([]rune, count) + gap := end - start + + // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 + // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 + + for count != 0 { + count-- + var ch rune + if chars == nil { + ch = rune(getCryptoRandomInt(gap) + int64(start)) + } else { + ch = chars[getCryptoRandomInt(gap)+int64(start)] + } + + if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { + if ch >= 56320 && ch <= 57343 { // low surrogate range + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = ch + count-- + // Insert high surrogate + buffer[count] = rune(55296 + getCryptoRandomInt(128)) + } + } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = rune(56320 + getCryptoRandomInt(128)) + count-- + // Insert high surrogate + buffer[count] = ch + } + } else if ch >= 56192 && ch <= 56319 { + // private high surrogate, skip it + count++ + } else { + // not one of the surrogates* + buffer[count] = ch + } + } else { + count++ + } + } + return string(buffer), nil +} + +func getCryptoRandomInt(count int) int64 { + nBig, err := rand.Int(rand.Reader, big.NewInt(int64(count))) + if err != nil { + panic(err) + } + return nBig.Int64() +} diff --git a/test/tools/vendor/github.com/Masterminds/goutils/randomstringutils.go b/test/tools/vendor/github.com/Masterminds/goutils/randomstringutils.go new file mode 100644 index 0000000000..272670231a --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/goutils/randomstringutils.go @@ -0,0 +1,248 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "fmt" + "math" + "math/rand" + "time" + "unicode" +) + +// RANDOM provides the time-based seed used to generate random numbers +var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano())) + +/* +RandomNonAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNonAlphaNumeric(count int) (string, error) { + return RandomAlphaNumericCustom(count, false, false) +} + +/* +RandomAscii creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAscii(count int) (string, error) { + return Random(count, 32, 127, false, false) +} + +/* +RandomNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNumeric(count int) (string, error) { + return Random(count, 0, 0, false, true) +} + +/* +RandomAlphabetic creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alphabetic characters. + +Parameters: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphabetic(count int) (string, error) { + return Random(count, 0, 0, true, false) +} + +/* +RandomAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumeric(count int) (string, error) { + return Random(count, 0, 0, true, true) +} + +/* +RandomAlphaNumericCustom creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { + return Random(count, 0, 0, letters, numbers) +} + +/* +Random creates a random string based on a variety of options, using default source of randomness. +This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but +instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode int) to start at + end - the position in set of chars (ASCII/Unicode int) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { + return RandomSeed(count, start, end, letters, numbers, chars, RANDOM) +} + +/* +RandomSeed creates a random string based on a variety of options, using supplied source of randomness. +If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, +unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. +If chars is not nil, characters stored in chars that are between start and end are chosen. +This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance +with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode decimals) to start at + end - the position in set of chars (ASCII/Unicode decimals) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + random - a source of randomness. + +Returns: + string - the random string + error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) +*/ +func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) { + + if count == 0 { + return "", nil + } else if count < 0 { + err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") + return "", err + } + if chars != nil && len(chars) == 0 { + err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") + return "", err + } + + if start == 0 && end == 0 { + if chars != nil { + end = len(chars) + } else { + if !letters && !numbers { + end = math.MaxInt32 + } else { + end = 'z' + 1 + start = ' ' + } + } + } else { + if end <= start { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) + return "", err + } + + if chars != nil && end > len(chars) { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) + return "", err + } + } + + buffer := make([]rune, count) + gap := end - start + + // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 + // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 + + for count != 0 { + count-- + var ch rune + if chars == nil { + ch = rune(random.Intn(gap) + start) + } else { + ch = chars[random.Intn(gap)+start] + } + + if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { + if ch >= 56320 && ch <= 57343 { // low surrogate range + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = ch + count-- + // Insert high surrogate + buffer[count] = rune(55296 + random.Intn(128)) + } + } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = rune(56320 + random.Intn(128)) + count-- + // Insert high surrogate + buffer[count] = ch + } + } else if ch >= 56192 && ch <= 56319 { + // private high surrogate, skip it + count++ + } else { + // not one of the surrogates* + buffer[count] = ch + } + } else { + count++ + } + } + return string(buffer), nil +} diff --git a/test/tools/vendor/github.com/Masterminds/goutils/stringutils.go b/test/tools/vendor/github.com/Masterminds/goutils/stringutils.go new file mode 100644 index 0000000000..741bb530e8 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/goutils/stringutils.go @@ -0,0 +1,240 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "bytes" + "fmt" + "strings" + "unicode" +) + +// Typically returned by functions where a searched item cannot be found +const INDEX_NOT_FOUND = -1 + +/* +Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..." + +Specifically, the algorithm is as follows: + + - If str is less than maxWidth characters long, return it. + - Else abbreviate it to (str[0:maxWidth - 3] + "..."). + - If maxWidth is less than 4, return an illegal argument error. + - In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func Abbreviate(str string, maxWidth int) (string, error) { + return AbbreviateFull(str, 0, maxWidth) +} + +/* +AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..." +This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not +necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear +somewhere in the result. +In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + offset - left edge of source string + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func AbbreviateFull(str string, offset int, maxWidth int) (string, error) { + if str == "" { + return "", nil + } + if maxWidth < 4 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4") + return "", err + } + if len(str) <= maxWidth { + return str, nil + } + if offset > len(str) { + offset = len(str) + } + if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7 + offset = len(str) - (maxWidth - 3) + } + abrevMarker := "..." + if offset <= 4 { + return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker; + } + if maxWidth < 7 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7") + return "", err + } + if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15 + abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3)) + return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3); + } + return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3)); +} + +/* +DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune). +It returns the string without whitespaces. + +Parameter: + str - the string to delete whitespace from, may be nil + +Returns: + the string without whitespaces +*/ +func DeleteWhiteSpace(str string) string { + if str == "" { + return str + } + sz := len(str) + var chs bytes.Buffer + count := 0 + for i := 0; i < sz; i++ { + ch := rune(str[i]) + if !unicode.IsSpace(ch) { + chs.WriteRune(ch) + count++ + } + } + if count == sz { + return str + } + return chs.String() +} + +/* +IndexOfDifference compares two strings, and returns the index at which the strings begin to differ. + +Parameters: + str1 - the first string + str2 - the second string + +Returns: + the index where str1 and str2 begin to differ; -1 if they are equal +*/ +func IndexOfDifference(str1 string, str2 string) int { + if str1 == str2 { + return INDEX_NOT_FOUND + } + if IsEmpty(str1) || IsEmpty(str2) { + return 0 + } + var i int + for i = 0; i < len(str1) && i < len(str2); i++ { + if rune(str1[i]) != rune(str2[i]) { + break + } + } + if i < len(str2) || i < len(str1) { + return i + } + return INDEX_NOT_FOUND +} + +/* +IsBlank checks if a string is whitespace or empty (""). Observe the following behavior: + + goutils.IsBlank("") = true + goutils.IsBlank(" ") = true + goutils.IsBlank("bob") = false + goutils.IsBlank(" bob ") = false + +Parameter: + str - the string to check + +Returns: + true - if the string is whitespace or empty ("") +*/ +func IsBlank(str string) bool { + strLen := len(str) + if str == "" || strLen == 0 { + return true + } + for i := 0; i < strLen; i++ { + if unicode.IsSpace(rune(str[i])) == false { + return false + } + } + return true +} + +/* +IndexOf returns the index of the first instance of sub in str, with the search beginning from the +index start point specified. -1 is returned if sub is not present in str. + +An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero. +A start position greater than the string length returns -1. + +Parameters: + str - the string to check + sub - the substring to find + start - the start position; negative treated as zero + +Returns: + the first index where the sub string was found (always >= start) +*/ +func IndexOf(str string, sub string, start int) int { + + if start < 0 { + start = 0 + } + + if len(str) < start { + return INDEX_NOT_FOUND + } + + if IsEmpty(str) || IsEmpty(sub) { + return INDEX_NOT_FOUND + } + + partialIndex := strings.Index(str[start:len(str)], sub) + if partialIndex == -1 { + return INDEX_NOT_FOUND + } + return partialIndex + start +} + +// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise. +func IsEmpty(str string) bool { + return len(str) == 0 +} + +// Returns either the passed in string, or if the string is empty, the value of defaultStr. +func DefaultString(str string, defaultStr string) string { + if IsEmpty(str) { + return defaultStr + } + return str +} + +// Returns either the passed in string, or if the string is whitespace, empty (""), the value of defaultStr. +func DefaultIfBlank(str string, defaultStr string) string { + if IsBlank(str) { + return defaultStr + } + return str +} diff --git a/test/tools/vendor/github.com/Masterminds/goutils/wordutils.go b/test/tools/vendor/github.com/Masterminds/goutils/wordutils.go new file mode 100644 index 0000000000..034cad8e21 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/goutils/wordutils.go @@ -0,0 +1,357 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package goutils provides utility functions to manipulate strings in various ways. +The code snippets below show examples of how to use goutils. Some functions return +errors while others do not, so usage would vary as a result. + +Example: + + package main + + import ( + "fmt" + "github.com/aokoli/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + } +*/ +package goutils + +import ( + "bytes" + "strings" + "unicode" +) + +// VERSION indicates the current version of goutils +const VERSION = "1.0.0" + +/* +Wrap wraps a single line of text, identifying words by ' '. +New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + +Returns: + a line with newlines inserted +*/ +func Wrap(str string, wrapLength int) string { + return WrapCustom(str, wrapLength, "", false) +} + +/* +WrapCustom wraps a single line of text, identifying words by ' '. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + newLineStr - the string to insert for a new line, "" uses '\n' + wrapLongWords - true if long words (such as URLs) should be wrapped + +Returns: + a line with newlines inserted +*/ +func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string { + + if str == "" { + return "" + } + if newLineStr == "" { + newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons + } + if wrapLength < 1 { + wrapLength = 1 + } + + inputLineLength := len(str) + offset := 0 + + var wrappedLine bytes.Buffer + + for inputLineLength-offset > wrapLength { + + if rune(str[offset]) == ' ' { + offset++ + continue + } + + end := wrapLength + offset + 1 + spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset + + if spaceToWrapAt >= offset { + // normal word (not longer than wrapLength) + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + + } else { + // long word or URL + if wrapLongWords { + end := wrapLength + offset + // long words are wrapped one line at a time + wrappedLine.WriteString(str[offset:end]) + wrappedLine.WriteString(newLineStr) + offset += wrapLength + } else { + // long words aren't wrapped, just extended beyond limit + end := wrapLength + offset + index := strings.IndexRune(str[end:len(str)], ' ') + if index == -1 { + wrappedLine.WriteString(str[offset:len(str)]) + offset = inputLineLength + } else { + spaceToWrapAt = index + end + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + } + } + } + } + + wrappedLine.WriteString(str[offset:len(str)]) + + return wrappedLine.String() + +} + +/* +Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed. +To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune). +The delimiters represent a set of characters understood to separate words. The first string character +and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "". +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func Capitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + capitalizeNext := true + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + capitalizeNext = true + } else if capitalizeNext { + buffer[i] = unicode.ToTitle(ch) + capitalizeNext = false + } + } + return string(buffer) + +} + +/* +CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a +titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood +to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized. +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func CapitalizeFully(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + str = strings.ToLower(str) + return Capitalize(str, delimiters...) +} + +/* +Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed. +The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter +character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to uncapitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + uncapitalized string +*/ +func Uncapitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char. + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + uncapitalizeNext = true + } else if uncapitalizeNext { + buffer[i] = unicode.ToLower(ch) + uncapitalizeNext = false + } + } + return string(buffer) +} + +/* +SwapCase swaps the case of a string using a word based algorithm. + +Conversion algorithm: + + Upper case character converts to Lower case + Title case character converts to Lower case + Lower case character after Whitespace or at start converts to Title case + Other Lower case character converts to Upper case + Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to swap case + +Returns: + the changed string +*/ +func SwapCase(str string) string { + if str == "" { + return str + } + buffer := []rune(str) + + whitespace := true + + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if unicode.IsUpper(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsTitle(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsLower(ch) { + if whitespace { + buffer[i] = unicode.ToTitle(ch) + whitespace = false + } else { + buffer[i] = unicode.ToUpper(ch) + } + } else { + whitespace = unicode.IsSpace(ch) + } + } + return string(buffer) +} + +/* +Initials extracts the initial letters from each word in the string. The first letter of the string and all first +letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters +parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string. + +Parameters: + str - the string to get initials from + delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter +Returns: + string of initial letters +*/ +func Initials(str string, delimiters ...rune) string { + if str == "" { + return str + } + if delimiters != nil && len(delimiters) == 0 { + return "" + } + strLen := len(str) + var buf bytes.Buffer + lastWasGap := true + for i := 0; i < strLen; i++ { + ch := rune(str[i]) + + if isDelimiter(ch, delimiters...) { + lastWasGap = true + } else if lastWasGap { + buf.WriteRune(ch) + lastWasGap = false + } + } + return buf.String() +} + +// private function (lower case func name) +func isDelimiter(ch rune, delimiters ...rune) bool { + if delimiters == nil { + return unicode.IsSpace(ch) + } + for _, delimiter := range delimiters { + if ch == delimiter { + return true + } + } + return false +} diff --git a/test/tools/vendor/github.com/Masterminds/semver/v3/.gitignore b/test/tools/vendor/github.com/Masterminds/semver/v3/.gitignore new file mode 100644 index 0000000000..6b061e6174 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/semver/v3/.gitignore @@ -0,0 +1 @@ +_fuzz/ \ No newline at end of file diff --git a/test/tools/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/test/tools/vendor/github.com/Masterminds/semver/v3/.golangci.yml new file mode 100644 index 0000000000..c87d1c4b90 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/semver/v3/.golangci.yml @@ -0,0 +1,30 @@ +run: + deadline: 2m + +linters: + disable-all: true + enable: + - misspell + - structcheck + - govet + - staticcheck + - deadcode + - errcheck + - varcheck + - unparam + - ineffassign + - nakedret + - gocyclo + - dupl + - goimports + - revive + - gosec + - gosimple + - typecheck + - unused + +linters-settings: + gofmt: + simplify: true + dupl: + threshold: 600 diff --git a/test/tools/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/test/tools/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md new file mode 100644 index 0000000000..f12626423a --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -0,0 +1,214 @@ +# Changelog + +## 3.2.0 (2022-11-28) + +### Added + +- #190: Added text marshaling and unmarshaling +- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg) +- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker) +- #179: Added New() version constructor (thanks @kazhuravlev) + +### Changed + +- #182/#183: Updated CI testing setup + +### Fixed + +- #186: Fixing issue where validation of constraint section gave false positives +- #176: Fix constraints check with *-0 (thanks @mtt0) +- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni) +- #161: Fixed godoc (thanks @afirth) + +## 3.1.1 (2020-11-23) + +### Fixed + +- #158: Fixed issue with generated regex operation order that could cause problem + +## 3.1.0 (2020-04-15) + +### Added + +- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah) + +### Changed + +- #148: More accurate validation messages on constraints + +## 3.0.3 (2019-12-13) + +### Fixed + +- #141: Fixed issue with <= comparison + +## 3.0.2 (2019-11-14) + +### Fixed + +- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos) + +## 3.0.1 (2019-09-13) + +### Fixed + +- #125: Fixes issue with module path for v3 + +## 3.0.0 (2019-09-12) + +This is a major release of the semver package which includes API changes. The Go +API is compatible with ^1. The Go API was not changed because many people are using +`go get` without Go modules for their applications and API breaking changes cause +errors which we have or would need to support. + +The changes in this release are the handling based on the data passed into the +functions. These are described in the added and changed sections below. + +### Added + +- StrictNewVersion function. This is similar to NewVersion but will return an + error if the version passed in is not a strict semantic version. For example, + 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly + speaking semantic versions. This function is faster, performs fewer operations, + and uses fewer allocations than NewVersion. +- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint. + The Makefile contains the operations used. For more information on you can start + on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing +- Now using Go modules + +### Changed + +- NewVersion has proper prerelease and metadata validation with error messages + to signal an issue with either of them +- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the + version is >=1 the ^ ranges works the same as v1. For major versions of 0 the + rules have changed. The minor version is treated as the stable version unless + a patch is specified and then it is equivalent to =. One difference from npm/js + is that prereleases there are only to a specific version (e.g. 1.2.3). + Prereleases here look over multiple versions and follow semantic version + ordering rules. This pattern now follows along with the expected and requested + handling of this packaged by numerous users. + +## 1.5.0 (2019-09-11) + +### Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +### Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +### Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +## 1.4.2 (2018-04-10) + +### Changed + +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +### Fixed + +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +## 1.4.1 (2018-04-02) + +### Fixed + +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +## 1.4.0 (2017-10-04) + +### Changed + +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +## 1.3.1 (2017-07-10) + +### Fixed + +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +## 1.3.0 (2017-05-02) + +### Added + +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +### Fixed + +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +### Changed + +- #55: The godoc icon moved from png to svg + +## 1.2.3 (2017-04-03) + +### Fixed + +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +## Release 1.2.2 (2016-12-13) + +### Fixed + +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +## Release 1.2.1 (2016-11-28) + +### Fixed + +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +## Release 1.2.0 (2016-11-04) + +### Added + +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +### Fixed + +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +## Release 1.1.1 (2016-06-30) + +### Changed + +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +## Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +## Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +## Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/test/tools/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/test/tools/vendor/github.com/Masterminds/semver/v3/LICENSE.txt new file mode 100644 index 0000000000..9ff7da9c48 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/semver/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/test/tools/vendor/github.com/Masterminds/semver/v3/Makefile b/test/tools/vendor/github.com/Masterminds/semver/v3/Makefile new file mode 100644 index 0000000000..eac19178fb --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/semver/v3/Makefile @@ -0,0 +1,37 @@ +GOPATH=$(shell go env GOPATH) +GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint +GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build +GOFUZZ = $(GOPATH)/bin/go-fuzz + +.PHONY: lint +lint: $(GOLANGCI_LINT) + @echo "==> Linting codebase" + @$(GOLANGCI_LINT) run + +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . + +.PHONY: fuzz +fuzz: $(GOFUZZBUILD) $(GOFUZZ) + @echo "==> Fuzz testing" + $(GOFUZZBUILD) + $(GOFUZZ) -workdir=_fuzz + +$(GOLANGCI_LINT): + # Install golangci-lint. The configuration for it is in the .golangci.yml + # file in the root of the repository + echo ${GOPATH} + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 + +$(GOFUZZBUILD): + cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build + +$(GOFUZZ): + cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep \ No newline at end of file diff --git a/test/tools/vendor/github.com/Masterminds/semver/v3/README.md b/test/tools/vendor/github.com/Masterminds/semver/v3/README.md new file mode 100644 index 0000000000..d8f54dcbd3 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/semver/v3/README.md @@ -0,0 +1,244 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions) +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +If you are looking for a command line tool for version comparisons please see +[vert](https://github.com/Masterminds/vert) which uses this library. + +## Package Versions + +There are three major versions fo the `semver` package. + +* 3.x.x is the new stable and active version. This version is focused on constraint + compatibility for range handling in other tools from other languages. It has + a similar API to the v1 releases. The development of this version is on the master + branch. The documentation for this version is below. +* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are + no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). + There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). +* 1.x.x is the most widely used version with numerous tagged releases. This is the + previous stable and is still maintained for bug fixes. The development, to fix + bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md). + +## Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an error is returned if there is an issue parsing the +version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. Getting the original string is useful if the semantic version was coerced +into a valid form. + +## Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + +```go +raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} +vs := make([]*semver.Version, len(raw)) +for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v +} + +sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other uses `Constraints`. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer that is valid with the + comparison section of the spec at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include one. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthand use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns while PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go +c, err := semver.NewConstraint(">= 1.2.3") +if err != nil { + // Handle constraint not being parsable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parsable. +} +// Check if the version meets the constraints. The a variable will be true. +a := c.Check(v) +``` + +### Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of space or comma separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +### Working With Prerelease Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of prereleases include +development, alpha, beta, and release candidate releases. A prerelease may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precedence, prereleases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification prereleases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer comparisons using constraints without a prerelease comparator will skip +prerelease versions. For example, `>=1.2.3` will skip prereleases when looking +at a list of releases while `>=1.2.3-0` will evaluate and find prereleases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the +spec. The lowest character is a `0` in ASCII sort order +(see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +### Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +### Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the patch level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +### Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +### Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` +* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` +* `^0.2` is equivalent to `>=0.2.0 <0.3.0` +* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` +* `^0.0` is equivalent to `>=0.0.0 <0.1.0` +* `^0` is equivalent to `>=0.0.0 <1.0.0` + +## Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go +c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") +if err != nil { + // Handle constraint not being parseable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parseable. +} + +// Validate a version against a constraint. +a, msgs := c.Validate(v) +// a is false +for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" +} +``` + +## Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/test/tools/vendor/github.com/Masterminds/semver/v3/collection.go b/test/tools/vendor/github.com/Masterminds/semver/v3/collection.go new file mode 100644 index 0000000000..a78235895f --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/semver/v3/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/test/tools/vendor/github.com/Masterminds/semver/v3/constraints.go b/test/tools/vendor/github.com/Masterminds/semver/v3/constraints.go new file mode 100644 index 0000000000..203072e464 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -0,0 +1,594 @@ +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + + // TODO: Find a way to validate and fetch all the constraints in a simpler form + + // Validate the segment + if !validConstraintRegex.MatchString(v) { + return nil, fmt.Errorf("improper constraint: %s", v) + } + + cs := findConstraintRegex.FindAllString(v, -1) + if cs == nil { + cs = append(cs, v) + } + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // TODO(mattfarina): For v4 of this library consolidate the Check and Validate + // functions as the underlying functions make that possible now. + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if check, _ := c.check(v); !check { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for _, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if _, err := c.check(v); err != nil { + e = append(e, err) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +func (cs Constraints) String() string { + buf := make([]string, len(cs.constraints)) + var tmp bytes.Buffer + + for k, v := range cs.constraints { + tmp.Reset() + vlen := len(v) + for kk, c := range v { + tmp.WriteString(c.string()) + + // Space separate the AND conditions + if vlen > 1 && kk < vlen-1 { + tmp.WriteString(" ") + } + } + buf[k] = tmp.String() + } + + return strings.Join(buf, " || ") +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (cs *Constraints) UnmarshalText(text []byte) error { + temp, err := NewConstraint(string(text)) + if err != nil { + return err + } + + *cs = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (cs Constraints) MarshalText() ([]byte, error) { + return []byte(cs.String()), nil +} + +var constraintOps map[string]cfunc +var constraintRegex *regexp.Regexp +var constraintRangeRegex *regexp.Regexp + +// Used to find individual constraints within a multi-constraint string +var findConstraintRegex *regexp.Regexp + +// Used to validate an segment of ANDs is valid +var validConstraintRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^` + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + ops, + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) + + findConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `(%s)\s*(%s)`, + ops, + cvRegex)) + + // The first time a constraint shows up will look slightly different from + // future times it shows up due to a leading space or comma in a given + // string. + validConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`, + ops, + cvRegex, + ops, + cvRegex)) +} + +// An individual constraint +type constraint struct { + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // The original operator for the constraint + origfunc string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) (bool, error) { + return constraintOps[c.origfunc](v, c) +} + +// String prints an individual constraint into a string +func (c *constraint) string() string { + return c.origfunc + c.orig +} + +type cfunc func(v *Version, c *constraint) (bool, error) + +func parseConstraint(c string) (*constraint, error) { + if len(c) > 0 { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + cs := &constraint{ + orig: m[2], + origfunc: m[1], + } + + ver := m[2] + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) || m[3] == "" { + ver = fmt.Sprintf("0.0.0%s", m[6]) + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs.con = con + cs.minorDirty = minorDirty + cs.patchDirty = patchDirty + cs.dirty = dirty + + return cs, nil + } + + // The rest is the special case where an empty string was passed in which + // is equivalent to * or >=0.0.0 + con, err := StrictNewVersion("0.0.0") + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + con: con, + orig: c, + origfunc: "", + minorDirty: false, + patchDirty: false, + dirty: true, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) (bool, error) { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.con.Major() != v.Major() { + return true, nil + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true, nil + } else if c.minorDirty { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } else if c.con.Patch() != v.Patch() && !c.patchDirty { + return true, nil + } else if c.patchDirty { + // Need to handle prereleases if present + if v.Prerelease() != "" || c.con.Prerelease() != "" { + eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + } + + eq := v.Equal(c.con) + if eq { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + + return true, nil +} + +func constraintGreaterThan(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return true, nil + } else if v.Major() < c.con.Major() { + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.minorDirty { + // This is a range case such as >11. When the version is something like + // 11.1.0 is it not > 11. For that we would need 12 or higher + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.patchDirty { + // This is for ranges such as >11.1. A version of 11.1.1 is not greater + // which one of 11.2.1 is greater + eq = v.Minor() > c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + // If we have gotten here we are not comparing pre-preleases and can use the + // Compare function to accomplish that. + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) +} + +func constraintLessThan(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) < 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) +} + +func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) >= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than %s", v, c.orig) +} + +func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) <= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + return true, nil +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true, nil + } + + if v.Major() != c.con.Major() { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig) + } + + return true, nil +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.dirty { + return constraintTilde(v, c) + } + + eq := v.Equal(c.con) + if eq { + return true, nil + } + + return false, fmt.Errorf("%s is not equal to %s", v, c.orig) +} + +// ^* --> (any) +// ^1.2.3 --> >=1.2.3 <2.0.0 +// ^1.2 --> >=1.2.0 <2.0.0 +// ^1 --> >=1.0.0 <2.0.0 +// ^0.2.3 --> >=0.2.3 <0.3.0 +// ^0.2 --> >=0.2.0 <0.3.0 +// ^0.0.3 --> >=0.0.3 <0.0.4 +// ^0.0 --> >=0.0.0 <0.1.0 +// ^0 --> >=0.0.0 <1.0.0 +func constraintCaret(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + // This less than handles prereleases + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + var eq bool + + // ^ when the major > 0 is >=x.y.z < x+1 + if c.con.Major() > 0 || c.minorDirty { + + // ^ has to be within a major range for > 0. Everything less than was + // filtered out with the LessThan call above. This filters out those + // that greater but not within the same major range. + eq = v.Major() == c.con.Major() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1 + if c.con.Major() == 0 && v.Major() > 0 { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + // If the con Minor is > 0 it is not dirty + if c.con.Minor() > 0 || c.patchDirty { + eq = v.Minor() == c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig) + } + // ^ when the minor is 0 and minor > 0 is =0.0.z + if c.con.Minor() == 0 && v.Minor() > 0 { + return false, fmt.Errorf("%s does not have same minor version as %s", v, c.orig) + } + + // At this point the major is 0 and the minor is 0 and not dirty. The patch + // is not dirty so we need to check if they are equal. If they are not equal + eq = c.con.Patch() == v.Patch() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig) +} + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/test/tools/vendor/github.com/Masterminds/semver/v3/doc.go b/test/tools/vendor/github.com/Masterminds/semver/v3/doc.go new file mode 100644 index 0000000000..74f97caa57 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/semver/v3/doc.go @@ -0,0 +1,184 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + - Parse semantic versions + - Sort semantic versions + - Check if a semantic version fits within a set of constraints + - Optionally work with a `v` prefix + +# Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an optional error can be returned if there is an issue +parsing the version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+b345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. For more details please see the documentation +at https://godoc.org/github.com/Masterminds/semver. + +# Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +# Checking Version Constraints and Comparing Versions + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other is using Constraints. There are some important +differences to notes between these two methods of comparison. + + 1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer valid with the comparison + spec section at https://semver.org/#spec-item-11 + 2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include on. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. + 3. Constraint ranges can have some complex rules including the shorthard use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns which PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parsable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parsable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +# Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma or space separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. This can also be written as +`">= 1.2, < 3.0.0 || >= 4.2.3"` + +The basic comparisons are: + + - `=`: equal (aliased to no operator) + - `!=`: not equal + - `>`: greater than + - `<`: less than + - `>=`: greater than or equal to + - `<=`: less than or equal to + +# Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +# Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the tilde operation. For example, + + - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `>= 1.2.x` is equivalent to `>= 1.2.0` + - `<= 2.x` is equivalent to `<= 3` + - `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` + - `~1` is equivalent to `>= 1, < 2` + - `~2.3` is equivalent to `>= 2.3 < 2.4` + - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `~1.x` is equivalent to `>= 1 < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + + - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + - `^2.3` is equivalent to `>= 2.3, < 3` + - `^2.x` is equivalent to `>= 2.0.0, < 3` + - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` + - `^0.2` is equivalent to `>=0.2.0 <0.3.0` + - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` + - `^0.0` is equivalent to `>=0.0.0 <0.1.0` + - `^0` is equivalent to `>=0.0.0 <1.0.0` + +# Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +*/ +package semver diff --git a/test/tools/vendor/github.com/Masterminds/semver/v3/fuzz.go b/test/tools/vendor/github.com/Masterminds/semver/v3/fuzz.go new file mode 100644 index 0000000000..a242ad7058 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/semver/v3/fuzz.go @@ -0,0 +1,22 @@ +// +build gofuzz + +package semver + +func Fuzz(data []byte) int { + d := string(data) + + // Test NewVersion + _, _ = NewVersion(d) + + // Test StrictNewVersion + _, _ = StrictNewVersion(d) + + // Test NewConstraint + _, _ = NewConstraint(d) + + // The return value should be 0 normally, 1 if the priority in future tests + // should be increased, and -1 if future tests should skip passing in that + // data. We do not have a reason to change priority so 0 is always returned. + // There are example tests that do this. + return 0 +} diff --git a/test/tools/vendor/github.com/Masterminds/semver/v3/version.go b/test/tools/vendor/github.com/Masterminds/semver/v3/version.go new file mode 100644 index 0000000000..7c4bed3347 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/semver/v3/version.go @@ -0,0 +1,639 @@ +package semver + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("Invalid Semantic Version") + + // ErrEmptyString is returned when an empty string is passed in for parsing. + ErrEmptyString = errors.New("Version string empty") + + // ErrInvalidCharacters is returned when invalid characters are found as + // part of a version + ErrInvalidCharacters = errors.New("Invalid characters in version") + + // ErrSegmentStartsZero is returned when a version segment starts with 0. + // This is invalid in SemVer. + ErrSegmentStartsZero = errors.New("Version segment starts with 0") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("Invalid Metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("Invalid Prerelease string") +) + +// semVerRegex is the regular expression used to parse a semantic version. +const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch uint64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + semVerRegex + "$") +} + +const ( + num string = "0123456789" + allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num +) + +// StrictNewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. Only parses valid semantic versions. +// Performs checking that can find errors within the version. +// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x +// releases of semver did, use the NewVersion() function. +func StrictNewVersion(v string) (*Version, error) { + // Parsing here does not use RegEx in order to increase performance and reduce + // allocations. + + if len(v) == 0 { + return nil, ErrEmptyString + } + + // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build + parts := strings.SplitN(v, ".", 3) + if len(parts) != 3 { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + original: v, + } + + // check for prerelease or build metadata + var extra []string + if strings.ContainsAny(parts[2], "-+") { + // Start with the build metadata first as it needs to be on the right + extra = strings.SplitN(parts[2], "+", 2) + if len(extra) > 1 { + // build metadata found + sv.metadata = extra[1] + parts[2] = extra[0] + } + + extra = strings.SplitN(parts[2], "-", 2) + if len(extra) > 1 { + // prerelease found + sv.pre = extra[1] + parts[2] = extra[0] + } + } + + // Validate the number segments are valid. This includes only having positive + // numbers and no leading 0's. + for _, p := range parts { + if !containsOnly(p, num) { + return nil, ErrInvalidCharacters + } + + if len(p) > 1 && p[0] == '0' { + return nil, ErrSegmentStartsZero + } + } + + // Extract the major, minor, and patch elements onto the returned Version + var err error + sv.major, err = strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return nil, err + } + + sv.minor, err = strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return nil, err + } + + sv.patch, err = strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return nil, err + } + + // No prerelease or build metadata found so returning now as a fastpath. + if sv.pre == "" && sv.metadata == "" { + return sv, nil + } + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. If the version is SemVer-ish it +// attempts to convert it to SemVer. If you want to validate it was a strict +// semantic version at parse time see StrictNewVersion(). +func NewVersion(v string) (*Version, error) { + m := versionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// New creates a new instance of Version with each of the parts passed in as +// arguments instead of parsing a version string. +func New(major, minor, patch uint64, pre, metadata string) *Version { + v := Version{ + major: major, + minor: minor, + patch: patch, + pre: pre, + metadata: metadata, + original: "", + } + + v.original = v.String() + + return &v +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v Version) Major() uint64 { + return v.major +} + +// Minor returns the minor version. +func (v Version) Minor() uint64 { + return v.minor +} + +// Patch returns the patch version. +func (v Version) Patch() uint64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v Version) originalVPrefix() string { + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps current patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hyphen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 { + if err := validatePrerelease(prerelease); err != nil { + return vNext, err + } + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 { + if err := validateMetadata(metadata); err != nil { + return vNext, err + } + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. Compare always takes into account +// prereleases. If you want to work with ranges using typical range syntaxes that +// skip prereleases if the range is not looking for them use constraints. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(text []byte) error { + temp, err := NewVersion(string(text)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (v Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} + +// Scan implements the SQL.Scanner interface. +func (v *Version) Scan(value interface{}) error { + var s string + s, _ = value.(string) + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// Value implements the Driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} + +func compareSegment(v, o uint64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 +} + +// Like strings.ContainsAny but does an only instead of any. +func containsOnly(s string, comp string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(comp, r) + }) == -1 +} + +// From the spec, "Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty. +// Numeric identifiers MUST NOT include leading zeroes.". These segments can +// be dot separated. +func validatePrerelease(p string) error { + eparts := strings.Split(p, ".") + for _, p := range eparts { + if containsOnly(p, num) { + if len(p) > 1 && p[0] == '0' { + return ErrSegmentStartsZero + } + } else if !containsOnly(p, allowed) { + return ErrInvalidPrerelease + } + } + + return nil +} + +// From the spec, "Build metadata MAY be denoted by +// appending a plus sign and a series of dot separated identifiers immediately +// following the patch or pre-release version. Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty." +func validateMetadata(m string) error { + eparts := strings.Split(m, ".") + for _, p := range eparts { + if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil +} diff --git a/test/tools/vendor/github.com/go-task/slim-sprig/.gitignore b/test/tools/vendor/github.com/Masterminds/sprig/v3/.gitignore similarity index 100% rename from test/tools/vendor/github.com/go-task/slim-sprig/.gitignore rename to test/tools/vendor/github.com/Masterminds/sprig/v3/.gitignore diff --git a/test/tools/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/test/tools/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md new file mode 100644 index 0000000000..2ce45dd4ec --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md @@ -0,0 +1,383 @@ +# Changelog + +## Release 3.2.3 (2022-11-29) + +### Changed + +- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) +- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) +- #353: Updated masterminds/semver which included bug fixes +- #354: Updated golang.org/x/crypto which included bug fixes + +## Release 3.2.2 (2021-02-04) + +This is a re-release of 3.2.1 to satisfy something with the Go module system. + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/test/tools/vendor/github.com/go-task/slim-sprig/LICENSE.txt b/test/tools/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt similarity index 100% rename from test/tools/vendor/github.com/go-task/slim-sprig/LICENSE.txt rename to test/tools/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt diff --git a/test/tools/vendor/github.com/Masterminds/sprig/v3/Makefile b/test/tools/vendor/github.com/Masterminds/sprig/v3/Makefile new file mode 100644 index 0000000000..78d409cde2 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/sprig/v3/Makefile @@ -0,0 +1,9 @@ +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . diff --git a/test/tools/vendor/github.com/Masterminds/sprig/v3/README.md b/test/tools/vendor/github.com/Masterminds/sprig/v3/README.md new file mode 100644 index 0000000000..3e22c60e1a --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/sprig/v3/README.md @@ -0,0 +1,100 @@ +# Sprig: Template functions for Go templates + +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/sprig/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/sprig)](https://goreportcard.com/report/github.com/Masterminds/sprig) +[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html) +[![](https://github.com/Masterminds/sprig/workflows/Tests/badge.svg)](https://github.com/Masterminds/sprig/actions) + +The Go language comes with a [built-in template +language](http://golang.org/pkg/text/template/), but not +very many template functions. Sprig is a library that provides more than 100 commonly +used template functions. + +It is inspired by the template functions found in +[Twig](http://twig.sensiolabs.org/documentation) and in various +JavaScript libraries, such as [underscore.js](http://underscorejs.org/). + +## IMPORTANT NOTES + +Sprig leverages [mergo](https://github.com/imdario/mergo) to handle merges. In +its v0.3.9 release, there was a behavior change that impacts merging template +functions in sprig. It is currently recommended to use v0.3.10 or later of that package. +Using v0.3.9 will cause sprig tests to fail. + +## Package Versions + +There are two active major versions of the `sprig` package. + +* v3 is currently stable release series on the `master` branch. The Go API should + remain compatible with v2, the current stable version. Behavior change behind + some functions is the reason for the new major version. +* v2 is the previous stable release series. It has been more than three years since + the initial release of v2. You can read the documentation and see the code + on the [release-2](https://github.com/Masterminds/sprig/tree/release-2) branch. + Bug fixes to this major version will continue for some time. + +## Usage + +**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig). + +For standard usage, read on. + +### Load the Sprig library + +To load the Sprig `FuncMap`: + +```go + +import ( + "github.com/Masterminds/sprig/v3" + "html/template" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) + + +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/test/tools/vendor/github.com/Masterminds/sprig/v3/crypto.go b/test/tools/vendor/github.com/Masterminds/sprig/v3/crypto.go new file mode 100644 index 0000000000..13a5cd5593 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/sprig/v3/crypto.go @@ -0,0 +1,653 @@ +package sprig + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "hash/adler32" + "io" + "math/big" + "net" + "time" + + "strings" + + "github.com/google/uuid" + bcrypt_lib "golang.org/x/crypto/bcrypt" + "golang.org/x/crypto/scrypt" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} + +func bcrypt(input string) string { + hash, err := bcrypt_lib.GenerateFromPassword([]byte(input), bcrypt_lib.DefaultCost) + if err != nil { + return fmt.Sprintf("failed to encrypt string with bcrypt: %s", err) + } + + return string(hash) +} + +func htpasswd(username string, password string) string { + if strings.Contains(username, ":") { + return fmt.Sprintf("invalid username: %s", username) + } + return fmt.Sprintf("%s:%s", username, bcrypt(password)) +} + +func randBytes(count int) (string, error) { + buf := make([]byte, count) + if _, err := rand.Read(buf); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(buf), nil +} + +// uuidv4 provides a safe and secure UUID v4 implementation +func uuidv4() string { + return uuid.New().String() +} + +var masterPasswordSeed = "com.lyndir.masterpassword" + +var passwordTypeTemplates = map[string][][]byte{ + "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")}, + "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"), + []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"), + []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"), + []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"), + []byte("CvccCvcvCvccno")}, + "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")}, + "short": {[]byte("Cvcn")}, + "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")}, + "pin": {[]byte("nnnn")}, +} + +var templateCharacters = map[byte]string{ + 'V': "AEIOU", + 'C': "BCDFGHJKLMNPQRSTVWXYZ", + 'v': "aeiou", + 'c': "bcdfghjklmnpqrstvwxyz", + 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ", + 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz", + 'n': "0123456789", + 'o': "@&%?,=[]_:-+*$#!'^~;()/.", + 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()", +} + +func derivePassword(counter uint32, passwordType, password, user, site string) string { + var templates = passwordTypeTemplates[passwordType] + if templates == nil { + return fmt.Sprintf("cannot find password template %s", passwordType) + } + + var buffer bytes.Buffer + buffer.WriteString(masterPasswordSeed) + binary.Write(&buffer, binary.BigEndian, uint32(len(user))) + buffer.WriteString(user) + + salt := buffer.Bytes() + key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64) + if err != nil { + return fmt.Sprintf("failed to derive password: %s", err) + } + + buffer.Truncate(len(masterPasswordSeed)) + binary.Write(&buffer, binary.BigEndian, uint32(len(site))) + buffer.WriteString(site) + binary.Write(&buffer, binary.BigEndian, counter) + + var hmacv = hmac.New(sha256.New, key) + hmacv.Write(buffer.Bytes()) + var seed = hmacv.Sum(nil) + var temp = templates[int(seed[0])%len(templates)] + + buffer.Truncate(0) + for i, element := range temp { + passChars := templateCharacters[element] + passChar := passChars[int(seed[i+1])%len(passChars)] + buffer.WriteByte(passChar) + } + + return buffer.String() +} + +func generatePrivateKey(typ string) string { + var priv interface{} + var err error + switch typ { + case "", "rsa": + // good enough for government work + priv, err = rsa.GenerateKey(rand.Reader, 4096) + case "dsa": + key := new(dsa.PrivateKey) + // again, good enough for government work + if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil { + return fmt.Sprintf("failed to generate dsa params: %s", err) + } + err = dsa.GenerateKey(key, rand.Reader) + priv = key + case "ecdsa": + // again, good enough for government work + priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + case "ed25519": + _, priv, err = ed25519.GenerateKey(rand.Reader) + default: + return "Unknown type " + typ + } + if err != nil { + return fmt.Sprintf("failed to generate private key: %s", err) + } + + return string(pem.EncodeToMemory(pemBlockForKey(priv))) +} + +// DSAKeyFormat stores the format for DSA keys. +// Used by pemBlockForKey +type DSAKeyFormat struct { + Version int + P, Q, G, Y, X *big.Int +} + +func pemBlockForKey(priv interface{}) *pem.Block { + switch k := priv.(type) { + case *rsa.PrivateKey: + return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} + case *dsa.PrivateKey: + val := DSAKeyFormat{ + P: k.P, Q: k.Q, G: k.G, + Y: k.Y, X: k.X, + } + bytes, _ := asn1.Marshal(val) + return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes} + case *ecdsa.PrivateKey: + b, _ := x509.MarshalECPrivateKey(k) + return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + default: + // attempt PKCS#8 format for all other keys + b, err := x509.MarshalPKCS8PrivateKey(k) + if err != nil { + return nil + } + return &pem.Block{Type: "PRIVATE KEY", Bytes: b} + } +} + +func parsePrivateKeyPEM(pemBlock string) (crypto.PrivateKey, error) { + block, _ := pem.Decode([]byte(pemBlock)) + if block == nil { + return nil, errors.New("no PEM data in input") + } + + if block.Type == "PRIVATE KEY" { + priv, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("decoding PEM as PKCS#8: %s", err) + } + return priv, nil + } else if !strings.HasSuffix(block.Type, " PRIVATE KEY") { + return nil, fmt.Errorf("no private key data in PEM block of type %s", block.Type) + } + + switch block.Type[:len(block.Type)-12] { // strip " PRIVATE KEY" + case "RSA": + priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing RSA private key from PEM: %s", err) + } + return priv, nil + case "EC": + priv, err := x509.ParseECPrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing EC private key from PEM: %s", err) + } + return priv, nil + case "DSA": + var k DSAKeyFormat + _, err := asn1.Unmarshal(block.Bytes, &k) + if err != nil { + return nil, fmt.Errorf("parsing DSA private key from PEM: %s", err) + } + priv := &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, Q: k.Q, G: k.G, + }, + Y: k.Y, + }, + X: k.X, + } + return priv, nil + default: + return nil, fmt.Errorf("invalid private key type %s", block.Type) + } +} + +func getPublicKey(priv crypto.PrivateKey) (crypto.PublicKey, error) { + switch k := priv.(type) { + case interface{ Public() crypto.PublicKey }: + return k.Public(), nil + case *dsa.PrivateKey: + return &k.PublicKey, nil + default: + return nil, fmt.Errorf("unable to get public key for type %T", priv) + } +} + +type certificate struct { + Cert string + Key string +} + +func buildCustomCertificate(b64cert string, b64key string) (certificate, error) { + crt := certificate{} + + cert, err := base64.StdEncoding.DecodeString(b64cert) + if err != nil { + return crt, errors.New("unable to decode base64 certificate") + } + + key, err := base64.StdEncoding.DecodeString(b64key) + if err != nil { + return crt, errors.New("unable to decode base64 private key") + } + + decodedCert, _ := pem.Decode(cert) + if decodedCert == nil { + return crt, errors.New("unable to decode certificate") + } + _, err = x509.ParseCertificate(decodedCert.Bytes) + if err != nil { + return crt, fmt.Errorf( + "error parsing certificate: decodedCert.Bytes: %s", + err, + ) + } + + _, err = parsePrivateKeyPEM(string(key)) + if err != nil { + return crt, fmt.Errorf( + "error parsing private key: %s", + err, + ) + } + + crt.Cert = string(cert) + crt.Key = string(key) + + return crt, nil +} + +func generateCertificateAuthority( + cn string, + daysValid int, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + + return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) +} + +func generateCertificateAuthorityWithPEMKey( + cn string, + daysValid int, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) +} + +func generateCertificateAuthorityWithKeyInternal( + cn string, + daysValid int, + priv crypto.PrivateKey, +) (certificate, error) { + ca := certificate{} + + template, err := getBaseCertTemplate(cn, nil, nil, daysValid) + if err != nil { + return ca, err + } + // Override KeyUsage and IsCA + template.KeyUsage = x509.KeyUsageKeyEncipherment | + x509.KeyUsageDigitalSignature | + x509.KeyUsageCertSign + template.IsCA = true + + ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) + + return ca, err +} + +func generateSelfSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) +} + +func generateSelfSignedCertificateWithPEMKey( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) +} + +func generateSelfSignedCertificateWithKeyInternal( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + priv crypto.PrivateKey, +) (certificate, error) { + cert := certificate{} + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) + + return cert, err +} + +func generateSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) +} + +func generateSignedCertificateWithPEMKey( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) +} + +func generateSignedCertificateWithKeyInternal( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, + priv crypto.PrivateKey, +) (certificate, error) { + cert := certificate{} + + decodedSignerCert, _ := pem.Decode([]byte(ca.Cert)) + if decodedSignerCert == nil { + return cert, errors.New("unable to decode certificate") + } + signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes) + if err != nil { + return cert, fmt.Errorf( + "error parsing certificate: decodedSignerCert.Bytes: %s", + err, + ) + } + signerKey, err := parsePrivateKeyPEM(ca.Key) + if err != nil { + return cert, fmt.Errorf( + "error parsing private key: %s", + err, + ) + } + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + cert.Cert, cert.Key, err = getCertAndKey( + template, + priv, + signerCert, + signerKey, + ) + + return cert, err +} + +func getCertAndKey( + template *x509.Certificate, + signeeKey crypto.PrivateKey, + parent *x509.Certificate, + signingKey crypto.PrivateKey, +) (string, string, error) { + signeePubKey, err := getPublicKey(signeeKey) + if err != nil { + return "", "", fmt.Errorf("error retrieving public key from signee key: %s", err) + } + derBytes, err := x509.CreateCertificate( + rand.Reader, + template, + parent, + signeePubKey, + signingKey, + ) + if err != nil { + return "", "", fmt.Errorf("error creating certificate: %s", err) + } + + certBuffer := bytes.Buffer{} + if err := pem.Encode( + &certBuffer, + &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}, + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding certificate: %s", err) + } + + keyBuffer := bytes.Buffer{} + if err := pem.Encode( + &keyBuffer, + pemBlockForKey(signeeKey), + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding key: %s", err) + } + + return certBuffer.String(), keyBuffer.String(), nil +} + +func getBaseCertTemplate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (*x509.Certificate, error) { + ipAddresses, err := getNetIPs(ips) + if err != nil { + return nil, err + } + dnsNames, err := getAlternateDNSStrs(alternateDNS) + if err != nil { + return nil, err + } + serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound) + if err != nil { + return nil, err + } + return &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: cn, + }, + IPAddresses: ipAddresses, + DNSNames: dnsNames, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + BasicConstraintsValid: true, + }, nil +} + +func getNetIPs(ips []interface{}) ([]net.IP, error) { + if ips == nil { + return []net.IP{}, nil + } + var ipStr string + var ok bool + var netIP net.IP + netIPs := make([]net.IP, len(ips)) + for i, ip := range ips { + ipStr, ok = ip.(string) + if !ok { + return nil, fmt.Errorf("error parsing ip: %v is not a string", ip) + } + netIP = net.ParseIP(ipStr) + if netIP == nil { + return nil, fmt.Errorf("error parsing ip: %s", ipStr) + } + netIPs[i] = netIP + } + return netIPs, nil +} + +func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { + if alternateDNS == nil { + return []string{}, nil + } + var dnsStr string + var ok bool + alternateDNSStrs := make([]string, len(alternateDNS)) + for i, dns := range alternateDNS { + dnsStr, ok = dns.(string) + if !ok { + return nil, fmt.Errorf( + "error processing alternate dns name: %v is not a string", + dns, + ) + } + alternateDNSStrs[i] = dnsStr + } + return alternateDNSStrs, nil +} + +func encryptAES(password string, plaintext string) (string, error) { + if plaintext == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + content := []byte(plaintext) + blockSize := block.BlockSize() + padding := blockSize - len(content)%blockSize + padtext := bytes.Repeat([]byte{byte(padding)}, padding) + content = append(content, padtext...) + + ciphertext := make([]byte, aes.BlockSize+len(content)) + + iv := ciphertext[:aes.BlockSize] + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return "", err + } + + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(ciphertext[aes.BlockSize:], content) + + return base64.StdEncoding.EncodeToString(ciphertext), nil +} + +func decryptAES(password string, crypt64 string) (string, error) { + if crypt64 == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + + crypt, err := base64.StdEncoding.DecodeString(crypt64) + if err != nil { + return "", err + } + + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + iv := crypt[:aes.BlockSize] + crypt = crypt[aes.BlockSize:] + decrypted := make([]byte, len(crypt)) + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(decrypted, crypt) + + return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil +} diff --git a/test/tools/vendor/github.com/go-task/slim-sprig/date.go b/test/tools/vendor/github.com/Masterminds/sprig/v3/date.go similarity index 100% rename from test/tools/vendor/github.com/go-task/slim-sprig/date.go rename to test/tools/vendor/github.com/Masterminds/sprig/v3/date.go diff --git a/test/tools/vendor/github.com/go-task/slim-sprig/defaults.go b/test/tools/vendor/github.com/Masterminds/sprig/v3/defaults.go similarity index 100% rename from test/tools/vendor/github.com/go-task/slim-sprig/defaults.go rename to test/tools/vendor/github.com/Masterminds/sprig/v3/defaults.go diff --git a/test/tools/vendor/github.com/Masterminds/sprig/v3/dict.go b/test/tools/vendor/github.com/Masterminds/sprig/v3/dict.go new file mode 100644 index 0000000000..ade8896984 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/sprig/v3/dict.go @@ -0,0 +1,174 @@ +package sprig + +import ( + "github.com/imdario/mergo" + "github.com/mitchellh/copystructure" +) + +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func mustMerge(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + +func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func mustMergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func deepCopy(i interface{}) interface{} { + c, err := mustDeepCopy(i) + if err != nil { + panic("deepCopy error: " + err.Error()) + } + + return c +} + +func mustDeepCopy(i interface{}) (interface{}, error) { + return copystructure.Copy(i) +} + +func dig(ps ...interface{}) (interface{}, error) { + if len(ps) < 3 { + panic("dig needs at least three arguments") + } + dict := ps[len(ps)-1].(map[string]interface{}) + def := ps[len(ps)-2] + ks := make([]string, len(ps)-2) + for i := 0; i < len(ks); i++ { + ks[i] = ps[i].(string) + } + + return digFromDict(dict, def, ks) +} + +func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { + k, ns := ks[0], ks[1:len(ks)] + step, has := dict[k] + if !has { + return d, nil + } + if len(ns) == 0 { + return step, nil + } + return digFromDict(step.(map[string]interface{}), d, ns) +} diff --git a/test/tools/vendor/github.com/go-task/slim-sprig/doc.go b/test/tools/vendor/github.com/Masterminds/sprig/v3/doc.go similarity index 100% rename from test/tools/vendor/github.com/go-task/slim-sprig/doc.go rename to test/tools/vendor/github.com/Masterminds/sprig/v3/doc.go diff --git a/test/tools/vendor/github.com/Masterminds/sprig/v3/functions.go b/test/tools/vendor/github.com/Masterminds/sprig/v3/functions.go new file mode 100644 index 0000000000..57fcec1d9e --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/sprig/v3/functions.go @@ -0,0 +1,382 @@ +package sprig + +import ( + "errors" + "html/template" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + ttemplate "text/template" + "time" + + util "github.com/Masterminds/goutils" + "github.com/huandu/xstrings" + "github.com/shopspring/decimal" +) + +// FuncMap produces the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +// +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TxtFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environment or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "randBytes", + "uuidv4", + + // OS + "env", + "expandenv", + + // Network + "getHostByName", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "duration": duration, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": time.Now, + "toDate": toDate, + "unixEpoch": unixEpoch, + + // Strings + "abbrev": abbrev, + "abbrevboth": abbrevboth, + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "untitle": untitle, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + "nospace": util.DeleteWhiteSpace, + "initials": initials, + "randAlphaNum": randAlphaNumeric, + "randAlpha": randAlpha, + "randAscii": randAscii, + "randNumeric": randNumeric, + "swapcase": util.SwapCase, + "shuffle": xstrings.Shuffle, + "snakecase": xstrings.ToSnakeCase, + "camelcase": xstrings.ToCamelCase, + "kebabcase": xstrings.ToKebabCase, + "wrap": func(l int, s string) string { return util.Wrap(s, l) }, + "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "adler32sum": adler32sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "seq": seq, + "toDecimal": toDecimal, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, + "add1f": func(i interface{}) float64 { + return execDecimalOp(i, []interface{}{1}, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) + }, + "addf": func(i ...interface{}) float64 { + a := interface{}(float64(0)) + return execDecimalOp(a, i, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) + }, + "subf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Sub(d2) }) + }, + "divf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Div(d2) }) + }, + "mulf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Mul(d2) }) + }, + "biggest": max, + "max": max, + "min": min, + "maxf": maxf, + "minf": minf, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "all": all, + "any": any, + "compact": compact, + "mustCompact": mustCompact, + "fromJson": fromJson, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustFromJson": mustFromJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, + "deepCopy": deepCopy, + "mustDeepCopy": mustDeepCopy, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, + + // OS: + "env": os.Getenv, + "expandenv": os.ExpandEnv, + + // Network: + "getHostByName": getHostByName, + + // Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Filepaths: + "osBase": filepath.Base, + "osClean": filepath.Clean, + "osDir": filepath.Dir, + "osExt": filepath.Ext, + "osIsAbs": filepath.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "get": get, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "merge": merge, + "mergeOverwrite": mergeOverwrite, + "mustMerge": mustMerge, + "mustMergeOverwrite": mustMergeOverwrite, + "values": values, + + "append": push, "push": push, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + "dig": dig, + "chunk": chunk, + "mustChunk": mustChunk, + + // Crypto: + "bcrypt": bcrypt, + "htpasswd": htpasswd, + "genPrivateKey": generatePrivateKey, + "derivePassword": derivePassword, + "buildCustomCert": buildCustomCertificate, + "genCA": generateCertificateAuthority, + "genCAWithKey": generateCertificateAuthorityWithPEMKey, + "genSelfSignedCert": generateSelfSignedCertificate, + "genSelfSignedCertWithKey": generateSelfSignedCertificateWithPEMKey, + "genSignedCert": generateSignedCertificate, + "genSignedCertWithKey": generateSignedCertificateWithPEMKey, + "encryptAES": encryptAES, + "decryptAES": decryptAES, + "randBytes": randBytes, + + // UUIDs: + "uuidv4": uuidv4, + + // SemVer: + "semver": semver, + "semverCompare": semverCompare, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + "regexQuoteMeta": regexQuoteMeta, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, +} diff --git a/test/tools/vendor/github.com/go-task/slim-sprig/list.go b/test/tools/vendor/github.com/Masterminds/sprig/v3/list.go similarity index 100% rename from test/tools/vendor/github.com/go-task/slim-sprig/list.go rename to test/tools/vendor/github.com/Masterminds/sprig/v3/list.go diff --git a/test/tools/vendor/github.com/go-task/slim-sprig/network.go b/test/tools/vendor/github.com/Masterminds/sprig/v3/network.go similarity index 100% rename from test/tools/vendor/github.com/go-task/slim-sprig/network.go rename to test/tools/vendor/github.com/Masterminds/sprig/v3/network.go diff --git a/test/tools/vendor/github.com/Masterminds/sprig/v3/numeric.go b/test/tools/vendor/github.com/Masterminds/sprig/v3/numeric.go new file mode 100644 index 0000000000..f68e4182ee --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/sprig/v3/numeric.go @@ -0,0 +1,186 @@ +package sprig + +import ( + "fmt" + "math" + "strconv" + "strings" + + "github.com/spf13/cast" + "github.com/shopspring/decimal" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + return cast.ToFloat64(v) +} + +func toInt(v interface{}) int { + return cast.ToInt(v) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + return cast.ToInt64(v) +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func maxf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Max(aa, bb) + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func minf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Min(aa, bb) + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, rOpt ...float64) float64 { + roundOn := .5 + if len(rOpt) > 0 { + roundOn = rOpt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} + +func seq(params ...int) string { + increment := 1 + switch len(params) { + case 0: + return "" + case 1: + start := 1 + end := params[0] + if end < start { + increment = -1 + } + return intArrayToString(untilStep(start, end+increment, increment), " ") + case 3: + start := params[0] + end := params[2] + step := params[1] + if end < start { + increment = -1 + if step > 0 { + return "" + } + } + return intArrayToString(untilStep(start, end+increment, step), " ") + case 2: + start := params[0] + end := params[1] + step := 1 + if end < start { + step = -1 + } + return intArrayToString(untilStep(start, end+step, step), " ") + default: + return "" + } +} + +func intArrayToString(slice []int, delimeter string) string { + return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") +} + +// performs a float and subsequent decimal.Decimal conversion on inputs, +// and iterates through a and b executing the mathmetical operation f +func execDecimalOp(a interface{}, b []interface{}, f func(d1, d2 decimal.Decimal) decimal.Decimal) float64 { + prt := decimal.NewFromFloat(toFloat64(a)) + for _, x := range b { + dx := decimal.NewFromFloat(toFloat64(x)) + prt = f(prt, dx) + } + rslt, _ := prt.Float64() + return rslt +} diff --git a/test/tools/vendor/github.com/go-task/slim-sprig/reflect.go b/test/tools/vendor/github.com/Masterminds/sprig/v3/reflect.go similarity index 100% rename from test/tools/vendor/github.com/go-task/slim-sprig/reflect.go rename to test/tools/vendor/github.com/Masterminds/sprig/v3/reflect.go diff --git a/test/tools/vendor/github.com/go-task/slim-sprig/regex.go b/test/tools/vendor/github.com/Masterminds/sprig/v3/regex.go similarity index 100% rename from test/tools/vendor/github.com/go-task/slim-sprig/regex.go rename to test/tools/vendor/github.com/Masterminds/sprig/v3/regex.go diff --git a/test/tools/vendor/github.com/Masterminds/sprig/v3/semver.go b/test/tools/vendor/github.com/Masterminds/sprig/v3/semver.go new file mode 100644 index 0000000000..3fbe08aa63 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/sprig/v3/semver.go @@ -0,0 +1,23 @@ +package sprig + +import ( + sv2 "github.com/Masterminds/semver/v3" +) + +func semverCompare(constraint, version string) (bool, error) { + c, err := sv2.NewConstraint(constraint) + if err != nil { + return false, err + } + + v, err := sv2.NewVersion(version) + if err != nil { + return false, err + } + + return c.Check(v), nil +} + +func semver(version string) (*sv2.Version, error) { + return sv2.NewVersion(version) +} diff --git a/test/tools/vendor/github.com/Masterminds/sprig/v3/strings.go b/test/tools/vendor/github.com/Masterminds/sprig/v3/strings.go new file mode 100644 index 0000000000..e0ae628c84 --- /dev/null +++ b/test/tools/vendor/github.com/Masterminds/sprig/v3/strings.go @@ -0,0 +1,236 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + + util "github.com/Masterminds/goutils" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func abbrev(width int, s string) string { + if width < 4 { + return s + } + r, _ := util.Abbreviate(s, width) + return r +} + +func abbrevboth(left, right int, s string) string { + if right < 4 || left > 0 && right < 7 { + return s + } + r, _ := util.AbbreviateFull(s, left, right) + return r +} +func initials(s string) string { + // Wrap this just to eliminate the var args, which templates don't do well. + return util.Initials(s) +} + +func randAlphaNumeric(count int) string { + // It is not possible, it appears, to actually generate an error here. + r, _ := util.CryptoRandomAlphaNumeric(count) + return r +} + +func randAlpha(count int) string { + r, _ := util.CryptoRandomAlphabetic(count) + return r +} + +func randAscii(count int) string { + r, _ := util.CryptoRandomAscii(count) + return r +} + +func randNumeric(count int) string { + r, _ := util.CryptoRandomNumeric(count) + return r +} + +func untitle(str string) string { + return util.Uncapitalize(str) +} + +func quote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + v = removeNilElements(v) + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } + } + return b + default: + if v == nil { + return []string{} + } + + return []string{strval(v)} + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] + } + return s +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + if start < 0 { + return s[:end] + } + if end < 0 || end > len(s) { + return s[start:] + } + return s[start:end] +} diff --git a/test/tools/vendor/github.com/go-task/slim-sprig/url.go b/test/tools/vendor/github.com/Masterminds/sprig/v3/url.go similarity index 100% rename from test/tools/vendor/github.com/go-task/slim-sprig/url.go rename to test/tools/vendor/github.com/Masterminds/sprig/v3/url.go diff --git a/test/tools/vendor/github.com/asaskevich/govalidator/.gitignore b/test/tools/vendor/github.com/asaskevich/govalidator/.gitignore new file mode 100644 index 0000000000..8d69a9418a --- /dev/null +++ b/test/tools/vendor/github.com/asaskevich/govalidator/.gitignore @@ -0,0 +1,15 @@ +bin/ +.idea/ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + diff --git a/test/tools/vendor/github.com/asaskevich/govalidator/.travis.yml b/test/tools/vendor/github.com/asaskevich/govalidator/.travis.yml new file mode 100644 index 0000000000..bb83c6670d --- /dev/null +++ b/test/tools/vendor/github.com/asaskevich/govalidator/.travis.yml @@ -0,0 +1,12 @@ +language: go +dist: xenial +go: + - '1.10' + - '1.11' + - '1.12' + - '1.13' + - 'tip' + +script: + - go test -coverpkg=./... -coverprofile=coverage.info -timeout=5s + - bash <(curl -s https://codecov.io/bash) diff --git a/test/tools/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md b/test/tools/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..4b462b0d81 --- /dev/null +++ b/test/tools/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md @@ -0,0 +1,43 @@ +# Contributor Code of Conduct + +This project adheres to [The Code Manifesto](http://codemanifesto.com) +as its guidelines for contributor interactions. + +## The Code Manifesto + +We want to work in an ecosystem that empowers developers to reach their +potential — one that encourages growth and effective collaboration. A space +that is safe for all. + +A space such as this benefits everyone that participates in it. It encourages +new developers to enter our field. It is through discussion and collaboration +that we grow, and through growth that we improve. + +In the effort to create such a place, we hold to these values: + +1. **Discrimination limits us.** This includes discrimination on the basis of + race, gender, sexual orientation, gender identity, age, nationality, + technology and any other arbitrary exclusion of a group of people. +2. **Boundaries honor us.** Your comfort levels are not everyone’s comfort + levels. Remember that, and if brought to your attention, heed it. +3. **We are our biggest assets.** None of us were born masters of our trade. + Each of us has been helped along the way. Return that favor, when and where + you can. +4. **We are resources for the future.** As an extension of #3, share what you + know. Make yourself a resource to help those that come after you. +5. **Respect defines us.** Treat others as you wish to be treated. Make your + discussions, criticisms and debates from a position of respectfulness. Ask + yourself, is it true? Is it necessary? Is it constructive? Anything less is + unacceptable. +6. **Reactions require grace.** Angry responses are valid, but abusive language + and vindictive actions are toxic. When something happens that offends you, + handle it assertively, but be respectful. Escalate reasonably, and try to + allow the offender an opportunity to explain themselves, and possibly + correct the issue. +7. **Opinions are just that: opinions.** Each and every one of us, due to our + background and upbringing, have varying opinions. That is perfectly + acceptable. Remember this: if you respect your own opinions, you should + respect the opinions of others. +8. **To err is human.** You might not intend it, but mistakes do happen and + contribute to build experience. Tolerate honest mistakes, and don't + hesitate to apologize if you make one yourself. diff --git a/test/tools/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/test/tools/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md new file mode 100644 index 0000000000..7ed268a1ed --- /dev/null +++ b/test/tools/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md @@ -0,0 +1,63 @@ +#### Support +If you do have a contribution to the package, feel free to create a Pull Request or an Issue. + +#### What to contribute +If you don't know what to do, there are some features and functions that need to be done + +- [ ] Refactor code +- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check +- [ ] Create actual list of contributors and projects that currently using this package +- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) +- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) +- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new +- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc +- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [ ] Implement fuzzing testing +- [ ] Implement some struct/map/array utilities +- [ ] Implement map/array validation +- [ ] Implement benchmarking +- [ ] Implement batch of examples +- [ ] Look at forks for new features and fixes + +#### Advice +Feel free to create what you want, but keep in mind when you implement new features: +- Code must be clear and readable, names of variables/constants clearly describes what they are doing +- Public functions must be documented and described in source file and added to README.md to the list of available functions +- There are must be unit-tests for any new functions and improvements + +## Financial contributions + +We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator). +Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed. + + +## Credits + + +### Contributors + +Thank you to all the people who have already contributed to govalidator! + + + +### Backers + +Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor)) + + + + + + + + + + + \ No newline at end of file diff --git a/test/tools/vendor/github.com/asaskevich/govalidator/LICENSE b/test/tools/vendor/github.com/asaskevich/govalidator/LICENSE new file mode 100644 index 0000000000..cacba91024 --- /dev/null +++ b/test/tools/vendor/github.com/asaskevich/govalidator/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014-2020 Alex Saskevich + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/test/tools/vendor/github.com/asaskevich/govalidator/README.md b/test/tools/vendor/github.com/asaskevich/govalidator/README.md new file mode 100644 index 0000000000..2c3fc35eb6 --- /dev/null +++ b/test/tools/vendor/github.com/asaskevich/govalidator/README.md @@ -0,0 +1,622 @@ +govalidator +=========== +[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) +[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) +[![Coverage](https://codecov.io/gh/asaskevich/govalidator/branch/master/graph/badge.svg)](https://codecov.io/gh/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield) + +A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js). + +#### Installation +Make sure that Go is installed on your computer. +Type the following command in your terminal: + + go get github.com/asaskevich/govalidator + +or you can get specified release of the package with `gopkg.in`: + + go get gopkg.in/asaskevich/govalidator.v10 + +After it the package is ready to use. + + +#### Import package in your project +Add following line in your `*.go` file: +```go +import "github.com/asaskevich/govalidator" +``` +If you are unhappy to use long `govalidator`, you can do something like this: +```go +import ( + valid "github.com/asaskevich/govalidator" +) +``` + +#### Activate behavior to require all fields have a validation tag by default +`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function. + +`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors. + +```go +import "github.com/asaskevich/govalidator" + +func init() { + govalidator.SetFieldsRequiredByDefault(true) +} +``` + +Here's some code to explain it: +```go +// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): +type exampleStruct struct { + Name string `` + Email string `valid:"email"` +} + +// this, however, will only fail when Email is empty or an invalid email address: +type exampleStruct2 struct { + Name string `valid:"-"` + Email string `valid:"email"` +} + +// lastly, this will only fail when Email is an invalid email address but not when it's empty: +type exampleStruct2 struct { + Name string `valid:"-"` + Email string `valid:"email,optional"` +} +``` + +#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123)) +##### Custom validator function signature +A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible. +```go +import "github.com/asaskevich/govalidator" + +// old signature +func(i interface{}) bool + +// new signature +func(i interface{}, o interface{}) bool +``` + +##### Adding a custom validator +This was changed to prevent data races when accessing custom validators. +```go +import "github.com/asaskevich/govalidator" + +// before +govalidator.CustomTypeTagMap["customByteArrayValidator"] = func(i interface{}, o interface{}) bool { + // ... +} + +// after +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, o interface{}) bool { + // ... +}) +``` + +#### List of functions: +```go +func Abs(value float64) float64 +func BlackList(str, chars string) string +func ByteLength(str string, params ...string) bool +func CamelCaseToUnderscore(str string) string +func Contains(str, substring string) bool +func Count(array []interface{}, iterator ConditionIterator) int +func Each(array []interface{}, iterator Iterator) +func ErrorByField(e error, field string) string +func ErrorsByField(e error) map[string]string +func Filter(array []interface{}, iterator ConditionIterator) []interface{} +func Find(array []interface{}, iterator ConditionIterator) interface{} +func GetLine(s string, index int) (string, error) +func GetLines(s string) []string +func HasLowerCase(str string) bool +func HasUpperCase(str string) bool +func HasWhitespace(str string) bool +func HasWhitespaceOnly(str string) bool +func InRange(value interface{}, left interface{}, right interface{}) bool +func InRangeFloat32(value, left, right float32) bool +func InRangeFloat64(value, left, right float64) bool +func InRangeInt(value, left, right interface{}) bool +func IsASCII(str string) bool +func IsAlpha(str string) bool +func IsAlphanumeric(str string) bool +func IsBase64(str string) bool +func IsByteLength(str string, min, max int) bool +func IsCIDR(str string) bool +func IsCRC32(str string) bool +func IsCRC32b(str string) bool +func IsCreditCard(str string) bool +func IsDNSName(str string) bool +func IsDataURI(str string) bool +func IsDialString(str string) bool +func IsDivisibleBy(str, num string) bool +func IsEmail(str string) bool +func IsExistingEmail(email string) bool +func IsFilePath(str string) (bool, int) +func IsFloat(str string) bool +func IsFullWidth(str string) bool +func IsHalfWidth(str string) bool +func IsHash(str string, algorithm string) bool +func IsHexadecimal(str string) bool +func IsHexcolor(str string) bool +func IsHost(str string) bool +func IsIP(str string) bool +func IsIPv4(str string) bool +func IsIPv6(str string) bool +func IsISBN(str string, version int) bool +func IsISBN10(str string) bool +func IsISBN13(str string) bool +func IsISO3166Alpha2(str string) bool +func IsISO3166Alpha3(str string) bool +func IsISO4217(str string) bool +func IsISO693Alpha2(str string) bool +func IsISO693Alpha3b(str string) bool +func IsIn(str string, params ...string) bool +func IsInRaw(str string, params ...string) bool +func IsInt(str string) bool +func IsJSON(str string) bool +func IsLatitude(str string) bool +func IsLongitude(str string) bool +func IsLowerCase(str string) bool +func IsMAC(str string) bool +func IsMD4(str string) bool +func IsMD5(str string) bool +func IsMagnetURI(str string) bool +func IsMongoID(str string) bool +func IsMultibyte(str string) bool +func IsNatural(value float64) bool +func IsNegative(value float64) bool +func IsNonNegative(value float64) bool +func IsNonPositive(value float64) bool +func IsNotNull(str string) bool +func IsNull(str string) bool +func IsNumeric(str string) bool +func IsPort(str string) bool +func IsPositive(value float64) bool +func IsPrintableASCII(str string) bool +func IsRFC3339(str string) bool +func IsRFC3339WithoutZone(str string) bool +func IsRGBcolor(str string) bool +func IsRegex(str string) bool +func IsRequestURI(rawurl string) bool +func IsRequestURL(rawurl string) bool +func IsRipeMD128(str string) bool +func IsRipeMD160(str string) bool +func IsRsaPub(str string, params ...string) bool +func IsRsaPublicKey(str string, keylen int) bool +func IsSHA1(str string) bool +func IsSHA256(str string) bool +func IsSHA384(str string) bool +func IsSHA512(str string) bool +func IsSSN(str string) bool +func IsSemver(str string) bool +func IsTiger128(str string) bool +func IsTiger160(str string) bool +func IsTiger192(str string) bool +func IsTime(str string, format string) bool +func IsType(v interface{}, params ...string) bool +func IsURL(str string) bool +func IsUTFDigit(str string) bool +func IsUTFLetter(str string) bool +func IsUTFLetterNumeric(str string) bool +func IsUTFNumeric(str string) bool +func IsUUID(str string) bool +func IsUUIDv3(str string) bool +func IsUUIDv4(str string) bool +func IsUUIDv5(str string) bool +func IsULID(str string) bool +func IsUnixTime(str string) bool +func IsUpperCase(str string) bool +func IsVariableWidth(str string) bool +func IsWhole(value float64) bool +func LeftTrim(str, chars string) string +func Map(array []interface{}, iterator ResultIterator) []interface{} +func Matches(str, pattern string) bool +func MaxStringLength(str string, params ...string) bool +func MinStringLength(str string, params ...string) bool +func NormalizeEmail(str string) (string, error) +func PadBoth(str string, padStr string, padLen int) string +func PadLeft(str string, padStr string, padLen int) string +func PadRight(str string, padStr string, padLen int) string +func PrependPathToErrors(err error, path string) error +func Range(str string, params ...string) bool +func RemoveTags(s string) string +func ReplacePattern(str, pattern, replace string) string +func Reverse(s string) string +func RightTrim(str, chars string) string +func RuneLength(str string, params ...string) bool +func SafeFileName(str string) string +func SetFieldsRequiredByDefault(value bool) +func SetNilPtrAllowedByRequired(value bool) +func Sign(value float64) float64 +func StringLength(str string, params ...string) bool +func StringMatches(s string, params ...string) bool +func StripLow(str string, keepNewLines bool) string +func ToBoolean(str string) (bool, error) +func ToFloat(str string) (float64, error) +func ToInt(value interface{}) (res int64, err error) +func ToJSON(obj interface{}) (string, error) +func ToString(obj interface{}) string +func Trim(str, chars string) string +func Truncate(str string, length int, ending string) string +func TruncatingErrorf(str string, args ...interface{}) error +func UnderscoreToCamelCase(s string) string +func ValidateMap(inputMap map[string]interface{}, validationMap map[string]interface{}) (bool, error) +func ValidateStruct(s interface{}) (bool, error) +func WhiteList(str, chars string) string +type ConditionIterator +type CustomTypeValidator +type Error +func (e Error) Error() string +type Errors +func (es Errors) Error() string +func (es Errors) Errors() []error +type ISO3166Entry +type ISO693Entry +type InterfaceParamValidator +type Iterator +type ParamValidator +type ResultIterator +type UnsupportedTypeError +func (e *UnsupportedTypeError) Error() string +type Validator +``` + +#### Examples +###### IsURL +```go +println(govalidator.IsURL(`http://user@pass:domain.com/path/page`)) +``` +###### IsType +```go +println(govalidator.IsType("Bob", "string")) +println(govalidator.IsType(1, "int")) +i := 1 +println(govalidator.IsType(&i, "*int")) +``` + +IsType can be used through the tag `type` which is essential for map validation: +```go +type User struct { + Name string `valid:"type(string)"` + Age int `valid:"type(int)"` + Meta interface{} `valid:"type(string)"` +} +result, err := govalidator.ValidateStruct(User{"Bob", 20, "meta"}) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` +###### ToString +```go +type User struct { + FirstName string + LastName string +} + +str := govalidator.ToString(&User{"John", "Juan"}) +println(str) +``` +###### Each, Map, Filter, Count for slices +Each iterates over the slice/array and calls Iterator for every item +```go +data := []interface{}{1, 2, 3, 4, 5} +var fn govalidator.Iterator = func(value interface{}, index int) { + println(value.(int)) +} +govalidator.Each(data, fn) +``` +```go +data := []interface{}{1, 2, 3, 4, 5} +var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} { + return value.(int) * 3 +} +_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15} +``` +```go +data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} +var fn govalidator.ConditionIterator = func(value interface{}, index int) bool { + return value.(int)%2 == 0 +} +_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10} +_ = govalidator.Count(data, fn) // result = 5 +``` +###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2) +If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this: +```go +govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { + return str == "duck" +}) +``` +For completely custom validators (interface-based), see below. + +Here is a list of available validators for struct fields (validator - used function): +```go +"email": IsEmail, +"url": IsURL, +"dialstring": IsDialString, +"requrl": IsRequestURL, +"requri": IsRequestURI, +"alpha": IsAlpha, +"utfletter": IsUTFLetter, +"alphanum": IsAlphanumeric, +"utfletternum": IsUTFLetterNumeric, +"numeric": IsNumeric, +"utfnumeric": IsUTFNumeric, +"utfdigit": IsUTFDigit, +"hexadecimal": IsHexadecimal, +"hexcolor": IsHexcolor, +"rgbcolor": IsRGBcolor, +"lowercase": IsLowerCase, +"uppercase": IsUpperCase, +"int": IsInt, +"float": IsFloat, +"null": IsNull, +"uuid": IsUUID, +"uuidv3": IsUUIDv3, +"uuidv4": IsUUIDv4, +"uuidv5": IsUUIDv5, +"creditcard": IsCreditCard, +"isbn10": IsISBN10, +"isbn13": IsISBN13, +"json": IsJSON, +"multibyte": IsMultibyte, +"ascii": IsASCII, +"printableascii": IsPrintableASCII, +"fullwidth": IsFullWidth, +"halfwidth": IsHalfWidth, +"variablewidth": IsVariableWidth, +"base64": IsBase64, +"datauri": IsDataURI, +"ip": IsIP, +"port": IsPort, +"ipv4": IsIPv4, +"ipv6": IsIPv6, +"dns": IsDNSName, +"host": IsHost, +"mac": IsMAC, +"latitude": IsLatitude, +"longitude": IsLongitude, +"ssn": IsSSN, +"semver": IsSemver, +"rfc3339": IsRFC3339, +"rfc3339WithoutZone": IsRFC3339WithoutZone, +"ISO3166Alpha2": IsISO3166Alpha2, +"ISO3166Alpha3": IsISO3166Alpha3, +"ulid": IsULID, +``` +Validators with parameters + +```go +"range(min|max)": Range, +"length(min|max)": ByteLength, +"runelength(min|max)": RuneLength, +"stringlength(min|max)": StringLength, +"matches(pattern)": StringMatches, +"in(string1|string2|...|stringN)": IsIn, +"rsapub(keylength)" : IsRsaPub, +"minstringlength(int): MinStringLength, +"maxstringlength(int): MaxStringLength, +``` +Validators with parameters for any type + +```go +"type(type)": IsType, +``` + +And here is small example of usage: +```go +type Post struct { + Title string `valid:"alphanum,required"` + Message string `valid:"duck,ascii"` + Message2 string `valid:"animal(dog)"` + AuthorIP string `valid:"ipv4"` + Date string `valid:"-"` +} +post := &Post{ + Title: "My Example Post", + Message: "duck", + Message2: "dog", + AuthorIP: "123.234.54.3", +} + +// Add your own struct validation tags +govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { + return str == "duck" +}) + +// Add your own struct validation tags with parameter +govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool { + species := params[0] + return str == species +}) +govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$") + +result, err := govalidator.ValidateStruct(post) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` +###### ValidateMap [#2](https://github.com/asaskevich/govalidator/pull/338) +If you want to validate maps, you can use the map to be validated and a validation map that contain the same tags used in ValidateStruct, both maps have to be in the form `map[string]interface{}` + +So here is small example of usage: +```go +var mapTemplate = map[string]interface{}{ + "name":"required,alpha", + "family":"required,alpha", + "email":"required,email", + "cell-phone":"numeric", + "address":map[string]interface{}{ + "line1":"required,alphanum", + "line2":"alphanum", + "postal-code":"numeric", + }, +} + +var inputMap = map[string]interface{}{ + "name":"Bob", + "family":"Smith", + "email":"foo@bar.baz", + "address":map[string]interface{}{ + "line1":"", + "line2":"", + "postal-code":"", + }, +} + +result, err := govalidator.ValidateMap(inputMap, mapTemplate) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` + +###### WhiteList +```go +// Remove all characters from string ignoring characters between "a" and "z" +println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa") +``` + +###### Custom validation functions +Custom validation using your own domain specific validators is also available - here's an example of how to use it: +```go +import "github.com/asaskevich/govalidator" + +type CustomByteArray [6]byte // custom types are supported and can be validated + +type StructWithCustomByteArray struct { + ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence + Email string `valid:"email"` + CustomMinLength int `valid:"-"` +} + +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, context interface{}) bool { + switch v := context.(type) { // you can type switch on the context interface being validated + case StructWithCustomByteArray: + // you can check and validate against some other field in the context, + // return early or not validate against the context at all – your choice + case SomeOtherType: + // ... + default: + // expecting some other type? Throw/panic here or continue + } + + switch v := i.(type) { // type switch on the struct field being validated + case CustomByteArray: + for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes + if e != 0 { + return true + } + } + } + return false +}) +govalidator.CustomTypeTagMap.Set("customMinLengthValidator", func(i interface{}, context interface{}) bool { + switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation + case StructWithCustomByteArray: + return len(v.ID) >= v.CustomMinLength + } + return false +}) +``` + +###### Loop over Error() +By default .Error() returns all errors in a single String. To access each error you can do this: +```go + if err != nil { + errs := err.(govalidator.Errors).Errors() + for _, e := range errs { + fmt.Println(e.Error()) + } + } +``` + +###### Custom error messages +Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it: +```go +type Ticket struct { + Id int64 `json:"id"` + FirstName string `json:"firstname" valid:"required~First name is blank"` +} +``` + +#### Notes +Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator). +Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator). + +#### Support +If you do have a contribution to the package, feel free to create a Pull Request or an Issue. + +#### What to contribute +If you don't know what to do, there are some features and functions that need to be done + +- [ ] Refactor code +- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check +- [ ] Create actual list of contributors and projects that currently using this package +- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) +- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) +- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new +- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc +- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [ ] Implement fuzzing testing +- [ ] Implement some struct/map/array utilities +- [ ] Implement map/array validation +- [ ] Implement benchmarking +- [ ] Implement batch of examples +- [ ] Look at forks for new features and fixes + +#### Advice +Feel free to create what you want, but keep in mind when you implement new features: +- Code must be clear and readable, names of variables/constants clearly describes what they are doing +- Public functions must be documented and described in source file and added to README.md to the list of available functions +- There are must be unit-tests for any new functions and improvements + +## Credits +### Contributors + +This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. + +#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors) +* [Daniel Lohse](https://github.com/annismckenzie) +* [Attila Oláh](https://github.com/attilaolah) +* [Daniel Korner](https://github.com/Dadie) +* [Steven Wilkin](https://github.com/stevenwilkin) +* [Deiwin Sarjas](https://github.com/deiwin) +* [Noah Shibley](https://github.com/slugmobile) +* [Nathan Davies](https://github.com/nathj07) +* [Matt Sanford](https://github.com/mzsanford) +* [Simon ccl1115](https://github.com/ccl1115) + + + + +### Backers + +Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)] + + + + + + + + + + + + + + + +## License +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large) diff --git a/test/tools/vendor/github.com/asaskevich/govalidator/arrays.go b/test/tools/vendor/github.com/asaskevich/govalidator/arrays.go new file mode 100644 index 0000000000..3e1da7cb48 --- /dev/null +++ b/test/tools/vendor/github.com/asaskevich/govalidator/arrays.go @@ -0,0 +1,87 @@ +package govalidator + +// Iterator is the function that accepts element of slice/array and its index +type Iterator func(interface{}, int) + +// ResultIterator is the function that accepts element of slice/array and its index and returns any result +type ResultIterator func(interface{}, int) interface{} + +// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean +type ConditionIterator func(interface{}, int) bool + +// ReduceIterator is the function that accepts two element of slice/array and returns result of merging those values +type ReduceIterator func(interface{}, interface{}) interface{} + +// Some validates that any item of array corresponds to ConditionIterator. Returns boolean. +func Some(array []interface{}, iterator ConditionIterator) bool { + res := false + for index, data := range array { + res = res || iterator(data, index) + } + return res +} + +// Every validates that every item of array corresponds to ConditionIterator. Returns boolean. +func Every(array []interface{}, iterator ConditionIterator) bool { + res := true + for index, data := range array { + res = res && iterator(data, index) + } + return res +} + +// Reduce boils down a list of values into a single value by ReduceIterator +func Reduce(array []interface{}, iterator ReduceIterator, initialValue interface{}) interface{} { + for _, data := range array { + initialValue = iterator(initialValue, data) + } + return initialValue +} + +// Each iterates over the slice and apply Iterator to every item +func Each(array []interface{}, iterator Iterator) { + for index, data := range array { + iterator(data, index) + } +} + +// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result. +func Map(array []interface{}, iterator ResultIterator) []interface{} { + var result = make([]interface{}, len(array)) + for index, data := range array { + result[index] = iterator(data, index) + } + return result +} + +// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise. +func Find(array []interface{}, iterator ConditionIterator) interface{} { + for index, data := range array { + if iterator(data, index) { + return data + } + } + return nil +} + +// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice. +func Filter(array []interface{}, iterator ConditionIterator) []interface{} { + var result = make([]interface{}, 0) + for index, data := range array { + if iterator(data, index) { + result = append(result, data) + } + } + return result +} + +// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator. +func Count(array []interface{}, iterator ConditionIterator) int { + count := 0 + for index, data := range array { + if iterator(data, index) { + count = count + 1 + } + } + return count +} diff --git a/test/tools/vendor/github.com/asaskevich/govalidator/converter.go b/test/tools/vendor/github.com/asaskevich/govalidator/converter.go new file mode 100644 index 0000000000..d68e990fc2 --- /dev/null +++ b/test/tools/vendor/github.com/asaskevich/govalidator/converter.go @@ -0,0 +1,81 @@ +package govalidator + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" +) + +// ToString convert the input to a string. +func ToString(obj interface{}) string { + res := fmt.Sprintf("%v", obj) + return res +} + +// ToJSON convert the input to a valid JSON string +func ToJSON(obj interface{}) (string, error) { + res, err := json.Marshal(obj) + if err != nil { + res = []byte("") + } + return string(res), err +} + +// ToFloat convert the input string to a float, or 0.0 if the input is not a float. +func ToFloat(value interface{}) (res float64, err error) { + val := reflect.ValueOf(value) + + switch value.(type) { + case int, int8, int16, int32, int64: + res = float64(val.Int()) + case uint, uint8, uint16, uint32, uint64: + res = float64(val.Uint()) + case float32, float64: + res = val.Float() + case string: + res, err = strconv.ParseFloat(val.String(), 64) + if err != nil { + res = 0 + } + default: + err = fmt.Errorf("ToInt: unknown interface type %T", value) + res = 0 + } + + return +} + +// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer. +func ToInt(value interface{}) (res int64, err error) { + val := reflect.ValueOf(value) + + switch value.(type) { + case int, int8, int16, int32, int64: + res = val.Int() + case uint, uint8, uint16, uint32, uint64: + res = int64(val.Uint()) + case float32, float64: + res = int64(val.Float()) + case string: + if IsInt(val.String()) { + res, err = strconv.ParseInt(val.String(), 0, 64) + if err != nil { + res = 0 + } + } else { + err = fmt.Errorf("ToInt: invalid numeric format %g", value) + res = 0 + } + default: + err = fmt.Errorf("ToInt: unknown interface type %T", value) + res = 0 + } + + return +} + +// ToBoolean convert the input string to a boolean. +func ToBoolean(str string) (bool, error) { + return strconv.ParseBool(str) +} diff --git a/test/tools/vendor/github.com/asaskevich/govalidator/doc.go b/test/tools/vendor/github.com/asaskevich/govalidator/doc.go new file mode 100644 index 0000000000..55dce62dc8 --- /dev/null +++ b/test/tools/vendor/github.com/asaskevich/govalidator/doc.go @@ -0,0 +1,3 @@ +package govalidator + +// A package of validators and sanitizers for strings, structures and collections. diff --git a/test/tools/vendor/github.com/asaskevich/govalidator/error.go b/test/tools/vendor/github.com/asaskevich/govalidator/error.go new file mode 100644 index 0000000000..1da2336f47 --- /dev/null +++ b/test/tools/vendor/github.com/asaskevich/govalidator/error.go @@ -0,0 +1,47 @@ +package govalidator + +import ( + "sort" + "strings" +) + +// Errors is an array of multiple errors and conforms to the error interface. +type Errors []error + +// Errors returns itself. +func (es Errors) Errors() []error { + return es +} + +func (es Errors) Error() string { + var errs []string + for _, e := range es { + errs = append(errs, e.Error()) + } + sort.Strings(errs) + return strings.Join(errs, ";") +} + +// Error encapsulates a name, an error and whether there's a custom error message or not. +type Error struct { + Name string + Err error + CustomErrorMessageExists bool + + // Validator indicates the name of the validator that failed + Validator string + Path []string +} + +func (e Error) Error() string { + if e.CustomErrorMessageExists { + return e.Err.Error() + } + + errName := e.Name + if len(e.Path) > 0 { + errName = strings.Join(append(e.Path, e.Name), ".") + } + + return errName + ": " + e.Err.Error() +} diff --git a/test/tools/vendor/github.com/asaskevich/govalidator/numerics.go b/test/tools/vendor/github.com/asaskevich/govalidator/numerics.go new file mode 100644 index 0000000000..5041d9e868 --- /dev/null +++ b/test/tools/vendor/github.com/asaskevich/govalidator/numerics.go @@ -0,0 +1,100 @@ +package govalidator + +import ( + "math" +) + +// Abs returns absolute value of number +func Abs(value float64) float64 { + return math.Abs(value) +} + +// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise +func Sign(value float64) float64 { + if value > 0 { + return 1 + } else if value < 0 { + return -1 + } else { + return 0 + } +} + +// IsNegative returns true if value < 0 +func IsNegative(value float64) bool { + return value < 0 +} + +// IsPositive returns true if value > 0 +func IsPositive(value float64) bool { + return value > 0 +} + +// IsNonNegative returns true if value >= 0 +func IsNonNegative(value float64) bool { + return value >= 0 +} + +// IsNonPositive returns true if value <= 0 +func IsNonPositive(value float64) bool { + return value <= 0 +} + +// InRangeInt returns true if value lies between left and right border +func InRangeInt(value, left, right interface{}) bool { + value64, _ := ToInt(value) + left64, _ := ToInt(left) + right64, _ := ToInt(right) + if left64 > right64 { + left64, right64 = right64, left64 + } + return value64 >= left64 && value64 <= right64 +} + +// InRangeFloat32 returns true if value lies between left and right border +func InRangeFloat32(value, left, right float32) bool { + if left > right { + left, right = right, left + } + return value >= left && value <= right +} + +// InRangeFloat64 returns true if value lies between left and right border +func InRangeFloat64(value, left, right float64) bool { + if left > right { + left, right = right, left + } + return value >= left && value <= right +} + +// InRange returns true if value lies between left and right border, generic type to handle int, float32, float64 and string. +// All types must the same type. +// False if value doesn't lie in range or if it incompatible or not comparable +func InRange(value interface{}, left interface{}, right interface{}) bool { + switch value.(type) { + case int: + intValue, _ := ToInt(value) + intLeft, _ := ToInt(left) + intRight, _ := ToInt(right) + return InRangeInt(intValue, intLeft, intRight) + case float32, float64: + intValue, _ := ToFloat(value) + intLeft, _ := ToFloat(left) + intRight, _ := ToFloat(right) + return InRangeFloat64(intValue, intLeft, intRight) + case string: + return value.(string) >= left.(string) && value.(string) <= right.(string) + default: + return false + } +} + +// IsWhole returns true if value is whole number +func IsWhole(value float64) bool { + return math.Remainder(value, 1) == 0 +} + +// IsNatural returns true if value is natural number (positive and whole) +func IsNatural(value float64) bool { + return IsWhole(value) && IsPositive(value) +} diff --git a/test/tools/vendor/github.com/asaskevich/govalidator/patterns.go b/test/tools/vendor/github.com/asaskevich/govalidator/patterns.go new file mode 100644 index 0000000000..bafc3765ea --- /dev/null +++ b/test/tools/vendor/github.com/asaskevich/govalidator/patterns.go @@ -0,0 +1,113 @@ +package govalidator + +import "regexp" + +// Basic regular expressions for validating strings +const ( + Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" + CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$" + ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" + ISBN13 string = "^(?:[0-9]{13})$" + UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" + UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + Alpha string = "^[a-zA-Z]+$" + Alphanumeric string = "^[a-zA-Z0-9]+$" + Numeric string = "^[0-9]+$" + Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$" + Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$" + Hexadecimal string = "^[0-9a-fA-F]+$" + Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" + RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" + ASCII string = "^[\x00-\x7F]+$" + Multibyte string = "[^\x00-\x7F]" + FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" + PrintableASCII string = "^[\x20-\x7E]+$" + DataURI string = "^data:.+\\/(.+);base64$" + MagnetURI string = "^magnet:\\?xt=urn:[a-zA-Z0-9]+:[a-zA-Z0-9]{32,40}&dn=.+&tr=.+$" + Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" + Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" + DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$` + IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` + URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)` + URLUsername string = `(\S+(:\S*)?@)` + URLPath string = `((\/|\?|#)[^\s]*)` + URLPort string = `(:(\d{1,5}))` + URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))` + URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` + URL = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` + SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` + WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` + UnixPath string = `^(/[^/\x00]*)+/?$` + WinARPath string = `^(?:(?:[a-zA-Z]:|\\\\[a-z0-9_.$●-]+\\[a-z0-9_.$●-]+)\\|\\?[^\\/:*?"<>|\r\n]+\\?)(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` + UnixARPath string = `^((\.{0,2}/)?([^/\x00]*))+/?$` + Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" + tagName string = "valid" + hasLowerCase string = ".*[[:lower:]]" + hasUpperCase string = ".*[[:upper:]]" + hasWhitespace string = ".*[[:space:]]" + hasWhitespaceOnly string = "^[[:space:]]+$" + IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$" + IMSI string = "^\\d{14,15}$" + E164 string = `^\+?[1-9]\d{1,14}$` +) + +// Used by IsFilePath func +const ( + // Unknown is unresolved OS type + Unknown = iota + // Win is Windows type + Win + // Unix is *nix OS types + Unix +) + +var ( + userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") + hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$") + userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") + rxEmail = regexp.MustCompile(Email) + rxCreditCard = regexp.MustCompile(CreditCard) + rxISBN10 = regexp.MustCompile(ISBN10) + rxISBN13 = regexp.MustCompile(ISBN13) + rxUUID3 = regexp.MustCompile(UUID3) + rxUUID4 = regexp.MustCompile(UUID4) + rxUUID5 = regexp.MustCompile(UUID5) + rxUUID = regexp.MustCompile(UUID) + rxAlpha = regexp.MustCompile(Alpha) + rxAlphanumeric = regexp.MustCompile(Alphanumeric) + rxNumeric = regexp.MustCompile(Numeric) + rxInt = regexp.MustCompile(Int) + rxFloat = regexp.MustCompile(Float) + rxHexadecimal = regexp.MustCompile(Hexadecimal) + rxHexcolor = regexp.MustCompile(Hexcolor) + rxRGBcolor = regexp.MustCompile(RGBcolor) + rxASCII = regexp.MustCompile(ASCII) + rxPrintableASCII = regexp.MustCompile(PrintableASCII) + rxMultibyte = regexp.MustCompile(Multibyte) + rxFullWidth = regexp.MustCompile(FullWidth) + rxHalfWidth = regexp.MustCompile(HalfWidth) + rxBase64 = regexp.MustCompile(Base64) + rxDataURI = regexp.MustCompile(DataURI) + rxMagnetURI = regexp.MustCompile(MagnetURI) + rxLatitude = regexp.MustCompile(Latitude) + rxLongitude = regexp.MustCompile(Longitude) + rxDNSName = regexp.MustCompile(DNSName) + rxURL = regexp.MustCompile(URL) + rxSSN = regexp.MustCompile(SSN) + rxWinPath = regexp.MustCompile(WinPath) + rxUnixPath = regexp.MustCompile(UnixPath) + rxARWinPath = regexp.MustCompile(WinARPath) + rxARUnixPath = regexp.MustCompile(UnixARPath) + rxSemver = regexp.MustCompile(Semver) + rxHasLowerCase = regexp.MustCompile(hasLowerCase) + rxHasUpperCase = regexp.MustCompile(hasUpperCase) + rxHasWhitespace = regexp.MustCompile(hasWhitespace) + rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) + rxIMEI = regexp.MustCompile(IMEI) + rxIMSI = regexp.MustCompile(IMSI) + rxE164 = regexp.MustCompile(E164) +) diff --git a/test/tools/vendor/github.com/asaskevich/govalidator/types.go b/test/tools/vendor/github.com/asaskevich/govalidator/types.go new file mode 100644 index 0000000000..c573abb51a --- /dev/null +++ b/test/tools/vendor/github.com/asaskevich/govalidator/types.go @@ -0,0 +1,656 @@ +package govalidator + +import ( + "reflect" + "regexp" + "sort" + "sync" +) + +// Validator is a wrapper for a validator function that returns bool and accepts string. +type Validator func(str string) bool + +// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type. +// The second parameter should be the context (in the case of validating a struct: the whole object being validated). +type CustomTypeValidator func(i interface{}, o interface{}) bool + +// ParamValidator is a wrapper for validator functions that accept additional parameters. +type ParamValidator func(str string, params ...string) bool + +// InterfaceParamValidator is a wrapper for functions that accept variants parameters for an interface value +type InterfaceParamValidator func(in interface{}, params ...string) bool +type tagOptionsMap map[string]tagOption + +func (t tagOptionsMap) orderedKeys() []string { + var keys []string + for k := range t { + keys = append(keys, k) + } + + sort.Slice(keys, func(a, b int) bool { + return t[keys[a]].order < t[keys[b]].order + }) + + return keys +} + +type tagOption struct { + name string + customErrorMessage string + order int +} + +// UnsupportedTypeError is a wrapper for reflect.Type +type UnsupportedTypeError struct { + Type reflect.Type +} + +// stringValues is a slice of reflect.Value holding *reflect.StringValue. +// It implements the methods to sort by string. +type stringValues []reflect.Value + +// InterfaceParamTagMap is a map of functions accept variants parameters for an interface value +var InterfaceParamTagMap = map[string]InterfaceParamValidator{ + "type": IsType, +} + +// InterfaceParamTagRegexMap maps interface param tags to their respective regexes. +var InterfaceParamTagRegexMap = map[string]*regexp.Regexp{ + "type": regexp.MustCompile(`^type\((.*)\)$`), +} + +// ParamTagMap is a map of functions accept variants parameters +var ParamTagMap = map[string]ParamValidator{ + "length": ByteLength, + "range": Range, + "runelength": RuneLength, + "stringlength": StringLength, + "matches": StringMatches, + "in": IsInRaw, + "rsapub": IsRsaPub, + "minstringlength": MinStringLength, + "maxstringlength": MaxStringLength, +} + +// ParamTagRegexMap maps param tags to their respective regexes. +var ParamTagRegexMap = map[string]*regexp.Regexp{ + "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"), + "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"), + "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"), + "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"), + "in": regexp.MustCompile(`^in\((.*)\)`), + "matches": regexp.MustCompile(`^matches\((.+)\)$`), + "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), + "minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"), + "maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"), +} + +type customTypeTagMap struct { + validators map[string]CustomTypeValidator + + sync.RWMutex +} + +func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) { + tm.RLock() + defer tm.RUnlock() + v, ok := tm.validators[name] + return v, ok +} + +func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) { + tm.Lock() + defer tm.Unlock() + tm.validators[name] = ctv +} + +// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function. +// Use this to validate compound or custom types that need to be handled as a whole, e.g. +// `type UUID [16]byte` (this would be handled as an array of bytes). +var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)} + +// TagMap is a map of functions, that can be used as tags for ValidateStruct function. +var TagMap = map[string]Validator{ + "email": IsEmail, + "url": IsURL, + "dialstring": IsDialString, + "requrl": IsRequestURL, + "requri": IsRequestURI, + "alpha": IsAlpha, + "utfletter": IsUTFLetter, + "alphanum": IsAlphanumeric, + "utfletternum": IsUTFLetterNumeric, + "numeric": IsNumeric, + "utfnumeric": IsUTFNumeric, + "utfdigit": IsUTFDigit, + "hexadecimal": IsHexadecimal, + "hexcolor": IsHexcolor, + "rgbcolor": IsRGBcolor, + "lowercase": IsLowerCase, + "uppercase": IsUpperCase, + "int": IsInt, + "float": IsFloat, + "null": IsNull, + "notnull": IsNotNull, + "uuid": IsUUID, + "uuidv3": IsUUIDv3, + "uuidv4": IsUUIDv4, + "uuidv5": IsUUIDv5, + "creditcard": IsCreditCard, + "isbn10": IsISBN10, + "isbn13": IsISBN13, + "json": IsJSON, + "multibyte": IsMultibyte, + "ascii": IsASCII, + "printableascii": IsPrintableASCII, + "fullwidth": IsFullWidth, + "halfwidth": IsHalfWidth, + "variablewidth": IsVariableWidth, + "base64": IsBase64, + "datauri": IsDataURI, + "ip": IsIP, + "port": IsPort, + "ipv4": IsIPv4, + "ipv6": IsIPv6, + "dns": IsDNSName, + "host": IsHost, + "mac": IsMAC, + "latitude": IsLatitude, + "longitude": IsLongitude, + "ssn": IsSSN, + "semver": IsSemver, + "rfc3339": IsRFC3339, + "rfc3339WithoutZone": IsRFC3339WithoutZone, + "ISO3166Alpha2": IsISO3166Alpha2, + "ISO3166Alpha3": IsISO3166Alpha3, + "ISO4217": IsISO4217, + "IMEI": IsIMEI, + "ulid": IsULID, +} + +// ISO3166Entry stores country codes +type ISO3166Entry struct { + EnglishShortName string + FrenchShortName string + Alpha2Code string + Alpha3Code string + Numeric string +} + +//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes" +var ISO3166List = []ISO3166Entry{ + {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"}, + {"Albania", "Albanie (l')", "AL", "ALB", "008"}, + {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"}, + {"Algeria", "Algérie (l')", "DZ", "DZA", "012"}, + {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"}, + {"Andorra", "Andorre (l')", "AD", "AND", "020"}, + {"Angola", "Angola (l')", "AO", "AGO", "024"}, + {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"}, + {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"}, + {"Argentina", "Argentine (l')", "AR", "ARG", "032"}, + {"Australia", "Australie (l')", "AU", "AUS", "036"}, + {"Austria", "Autriche (l')", "AT", "AUT", "040"}, + {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"}, + {"Bahrain", "Bahreïn", "BH", "BHR", "048"}, + {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"}, + {"Armenia", "Arménie (l')", "AM", "ARM", "051"}, + {"Barbados", "Barbade (la)", "BB", "BRB", "052"}, + {"Belgium", "Belgique (la)", "BE", "BEL", "056"}, + {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"}, + {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"}, + {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"}, + {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"}, + {"Botswana", "Botswana (le)", "BW", "BWA", "072"}, + {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"}, + {"Brazil", "Brésil (le)", "BR", "BRA", "076"}, + {"Belize", "Belize (le)", "BZ", "BLZ", "084"}, + {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"}, + {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"}, + {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"}, + {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"}, + {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"}, + {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"}, + {"Burundi", "Burundi (le)", "BI", "BDI", "108"}, + {"Belarus", "Bélarus (le)", "BY", "BLR", "112"}, + {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"}, + {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"}, + {"Canada", "Canada (le)", "CA", "CAN", "124"}, + {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"}, + {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"}, + {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"}, + {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"}, + {"Chad", "Tchad (le)", "TD", "TCD", "148"}, + {"Chile", "Chili (le)", "CL", "CHL", "152"}, + {"China", "Chine (la)", "CN", "CHN", "156"}, + {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"}, + {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"}, + {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"}, + {"Colombia", "Colombie (la)", "CO", "COL", "170"}, + {"Comoros (the)", "Comores (les)", "KM", "COM", "174"}, + {"Mayotte", "Mayotte", "YT", "MYT", "175"}, + {"Congo (the)", "Congo (le)", "CG", "COG", "178"}, + {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"}, + {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"}, + {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"}, + {"Croatia", "Croatie (la)", "HR", "HRV", "191"}, + {"Cuba", "Cuba", "CU", "CUB", "192"}, + {"Cyprus", "Chypre", "CY", "CYP", "196"}, + {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"}, + {"Benin", "Bénin (le)", "BJ", "BEN", "204"}, + {"Denmark", "Danemark (le)", "DK", "DNK", "208"}, + {"Dominica", "Dominique (la)", "DM", "DMA", "212"}, + {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"}, + {"Ecuador", "Équateur (l')", "EC", "ECU", "218"}, + {"El Salvador", "El Salvador", "SV", "SLV", "222"}, + {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"}, + {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"}, + {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"}, + {"Estonia", "Estonie (l')", "EE", "EST", "233"}, + {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"}, + {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"}, + {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"}, + {"Fiji", "Fidji (les)", "FJ", "FJI", "242"}, + {"Finland", "Finlande (la)", "FI", "FIN", "246"}, + {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"}, + {"France", "France (la)", "FR", "FRA", "250"}, + {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"}, + {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"}, + {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"}, + {"Djibouti", "Djibouti", "DJ", "DJI", "262"}, + {"Gabon", "Gabon (le)", "GA", "GAB", "266"}, + {"Georgia", "Géorgie (la)", "GE", "GEO", "268"}, + {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"}, + {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"}, + {"Germany", "Allemagne (l')", "DE", "DEU", "276"}, + {"Ghana", "Ghana (le)", "GH", "GHA", "288"}, + {"Gibraltar", "Gibraltar", "GI", "GIB", "292"}, + {"Kiribati", "Kiribati", "KI", "KIR", "296"}, + {"Greece", "Grèce (la)", "GR", "GRC", "300"}, + {"Greenland", "Groenland (le)", "GL", "GRL", "304"}, + {"Grenada", "Grenade (la)", "GD", "GRD", "308"}, + {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"}, + {"Guam", "Guam", "GU", "GUM", "316"}, + {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"}, + {"Guinea", "Guinée (la)", "GN", "GIN", "324"}, + {"Guyana", "Guyana (le)", "GY", "GUY", "328"}, + {"Haiti", "Haïti", "HT", "HTI", "332"}, + {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"}, + {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"}, + {"Honduras", "Honduras (le)", "HN", "HND", "340"}, + {"Hong Kong", "Hong Kong", "HK", "HKG", "344"}, + {"Hungary", "Hongrie (la)", "HU", "HUN", "348"}, + {"Iceland", "Islande (l')", "IS", "ISL", "352"}, + {"India", "Inde (l')", "IN", "IND", "356"}, + {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"}, + {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"}, + {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"}, + {"Ireland", "Irlande (l')", "IE", "IRL", "372"}, + {"Israel", "Israël", "IL", "ISR", "376"}, + {"Italy", "Italie (l')", "IT", "ITA", "380"}, + {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"}, + {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"}, + {"Japan", "Japon (le)", "JP", "JPN", "392"}, + {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"}, + {"Jordan", "Jordanie (la)", "JO", "JOR", "400"}, + {"Kenya", "Kenya (le)", "KE", "KEN", "404"}, + {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"}, + {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"}, + {"Kuwait", "Koweït (le)", "KW", "KWT", "414"}, + {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"}, + {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"}, + {"Lebanon", "Liban (le)", "LB", "LBN", "422"}, + {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"}, + {"Latvia", "Lettonie (la)", "LV", "LVA", "428"}, + {"Liberia", "Libéria (le)", "LR", "LBR", "430"}, + {"Libya", "Libye (la)", "LY", "LBY", "434"}, + {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"}, + {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"}, + {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"}, + {"Macao", "Macao", "MO", "MAC", "446"}, + {"Madagascar", "Madagascar", "MG", "MDG", "450"}, + {"Malawi", "Malawi (le)", "MW", "MWI", "454"}, + {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"}, + {"Maldives", "Maldives (les)", "MV", "MDV", "462"}, + {"Mali", "Mali (le)", "ML", "MLI", "466"}, + {"Malta", "Malte", "MT", "MLT", "470"}, + {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"}, + {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"}, + {"Mauritius", "Maurice", "MU", "MUS", "480"}, + {"Mexico", "Mexique (le)", "MX", "MEX", "484"}, + {"Monaco", "Monaco", "MC", "MCO", "492"}, + {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"}, + {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"}, + {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"}, + {"Montserrat", "Montserrat", "MS", "MSR", "500"}, + {"Morocco", "Maroc (le)", "MA", "MAR", "504"}, + {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"}, + {"Oman", "Oman", "OM", "OMN", "512"}, + {"Namibia", "Namibie (la)", "NA", "NAM", "516"}, + {"Nauru", "Nauru", "NR", "NRU", "520"}, + {"Nepal", "Népal (le)", "NP", "NPL", "524"}, + {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"}, + {"Curaçao", "Curaçao", "CW", "CUW", "531"}, + {"Aruba", "Aruba", "AW", "ABW", "533"}, + {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"}, + {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"}, + {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"}, + {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"}, + {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"}, + {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"}, + {"Niger (the)", "Niger (le)", "NE", "NER", "562"}, + {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"}, + {"Niue", "Niue", "NU", "NIU", "570"}, + {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"}, + {"Norway", "Norvège (la)", "NO", "NOR", "578"}, + {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"}, + {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"}, + {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"}, + {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"}, + {"Palau", "Palaos (les)", "PW", "PLW", "585"}, + {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"}, + {"Panama", "Panama (le)", "PA", "PAN", "591"}, + {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"}, + {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"}, + {"Peru", "Pérou (le)", "PE", "PER", "604"}, + {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"}, + {"Pitcairn", "Pitcairn", "PN", "PCN", "612"}, + {"Poland", "Pologne (la)", "PL", "POL", "616"}, + {"Portugal", "Portugal (le)", "PT", "PRT", "620"}, + {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"}, + {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"}, + {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"}, + {"Qatar", "Qatar (le)", "QA", "QAT", "634"}, + {"Réunion", "Réunion (La)", "RE", "REU", "638"}, + {"Romania", "Roumanie (la)", "RO", "ROU", "642"}, + {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"}, + {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"}, + {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"}, + {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"}, + {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"}, + {"Anguilla", "Anguilla", "AI", "AIA", "660"}, + {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"}, + {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"}, + {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"}, + {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"}, + {"San Marino", "Saint-Marin", "SM", "SMR", "674"}, + {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"}, + {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"}, + {"Senegal", "Sénégal (le)", "SN", "SEN", "686"}, + {"Serbia", "Serbie (la)", "RS", "SRB", "688"}, + {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"}, + {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"}, + {"Singapore", "Singapour", "SG", "SGP", "702"}, + {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"}, + {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"}, + {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"}, + {"Somalia", "Somalie (la)", "SO", "SOM", "706"}, + {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"}, + {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"}, + {"Spain", "Espagne (l')", "ES", "ESP", "724"}, + {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"}, + {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"}, + {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"}, + {"Suriname", "Suriname (le)", "SR", "SUR", "740"}, + {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"}, + {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"}, + {"Sweden", "Suède (la)", "SE", "SWE", "752"}, + {"Switzerland", "Suisse (la)", "CH", "CHE", "756"}, + {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"}, + {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"}, + {"Thailand", "Thaïlande (la)", "TH", "THA", "764"}, + {"Togo", "Togo (le)", "TG", "TGO", "768"}, + {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"}, + {"Tonga", "Tonga (les)", "TO", "TON", "776"}, + {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"}, + {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"}, + {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"}, + {"Turkey", "Turquie (la)", "TR", "TUR", "792"}, + {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"}, + {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"}, + {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"}, + {"Uganda", "Ouganda (l')", "UG", "UGA", "800"}, + {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"}, + {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"}, + {"Egypt", "Égypte (l')", "EG", "EGY", "818"}, + {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"}, + {"Guernsey", "Guernesey", "GG", "GGY", "831"}, + {"Jersey", "Jersey", "JE", "JEY", "832"}, + {"Isle of Man", "Île de Man", "IM", "IMN", "833"}, + {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"}, + {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"}, + {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"}, + {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"}, + {"Uruguay", "Uruguay (l')", "UY", "URY", "858"}, + {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"}, + {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"}, + {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"}, + {"Samoa", "Samoa (le)", "WS", "WSM", "882"}, + {"Yemen", "Yémen (le)", "YE", "YEM", "887"}, + {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"}, +} + +// ISO4217List is the list of ISO currency codes +var ISO4217List = []string{ + "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", + "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD", + "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK", + "DJF", "DKK", "DOP", "DZD", + "EGP", "ERN", "ETB", "EUR", + "FJD", "FKP", + "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", + "HKD", "HNL", "HRK", "HTG", "HUF", + "IDR", "ILS", "INR", "IQD", "IRR", "ISK", + "JMD", "JOD", "JPY", + "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT", + "LAK", "LBP", "LKR", "LRD", "LSL", "LYD", + "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN", + "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", + "OMR", + "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", + "QAR", + "RON", "RSD", "RUB", "RWF", + "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "STN", "SVC", "SYP", "SZL", + "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", + "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UYW", "UZS", + "VEF", "VES", "VND", "VUV", + "WST", + "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX", + "YER", + "ZAR", "ZMW", "ZWL", +} + +// ISO693Entry stores ISO language codes +type ISO693Entry struct { + Alpha3bCode string + Alpha2Code string + English string +} + +//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json +var ISO693List = []ISO693Entry{ + {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"}, + {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"}, + {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"}, + {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"}, + {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"}, + {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"}, + {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"}, + {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"}, + {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"}, + {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"}, + {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"}, + {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"}, + {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"}, + {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"}, + {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"}, + {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"}, + {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"}, + {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"}, + {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"}, + {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"}, + {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"}, + {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"}, + {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"}, + {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"}, + {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"}, + {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"}, + {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"}, + {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"}, + {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"}, + {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"}, + {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"}, + {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"}, + {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"}, + {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"}, + {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"}, + {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"}, + {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"}, + {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"}, + {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"}, + {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"}, + {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"}, + {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"}, + {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"}, + {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"}, + {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"}, + {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"}, + {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"}, + {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"}, + {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"}, + {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"}, + {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"}, + {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"}, + {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"}, + {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"}, + {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"}, + {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"}, + {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"}, + {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"}, + {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"}, + {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"}, + {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"}, + {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"}, + {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"}, + {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"}, + {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"}, + {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"}, + {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"}, + {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"}, + {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"}, + {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"}, + {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"}, + {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"}, + {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"}, + {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"}, + {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"}, + {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"}, + {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"}, + {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"}, + {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"}, + {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"}, + {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"}, + {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"}, + {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"}, + {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"}, + {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"}, + {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"}, + {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"}, + {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"}, + {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"}, + {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"}, + {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"}, + {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"}, + {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"}, + {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"}, + {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"}, + {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"}, + {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"}, + {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"}, + {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"}, + {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"}, + {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"}, + {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"}, + {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"}, + {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"}, + {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"}, + {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"}, + {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"}, + {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"}, + {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"}, + {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"}, + {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"}, + {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"}, + {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"}, + {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"}, + {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"}, + {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"}, + {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"}, + {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"}, + {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"}, + {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"}, + {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"}, + {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"}, + {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"}, + {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"}, + {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"}, + {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"}, + {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"}, + {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"}, + {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"}, + {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"}, + {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"}, + {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"}, + {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"}, + {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"}, + {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"}, + {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"}, + {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"}, + {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"}, + {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"}, + {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"}, + {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"}, + {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"}, + {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"}, + {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"}, + {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"}, + {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"}, + {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"}, + {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"}, + {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"}, + {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"}, + {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"}, + {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"}, + {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"}, + {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"}, + {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"}, + {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"}, + {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"}, + {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"}, + {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"}, + {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"}, + {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"}, + {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"}, + {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"}, + {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"}, + {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"}, + {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"}, + {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"}, + {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"}, + {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"}, + {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"}, + {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"}, + {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"}, + {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"}, + {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"}, + {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"}, + {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"}, + {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"}, + {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"}, + {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"}, + {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"}, + {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"}, + {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"}, + {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"}, + {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"}, +} diff --git a/test/tools/vendor/github.com/asaskevich/govalidator/utils.go b/test/tools/vendor/github.com/asaskevich/govalidator/utils.go new file mode 100644 index 0000000000..f4c30f824a --- /dev/null +++ b/test/tools/vendor/github.com/asaskevich/govalidator/utils.go @@ -0,0 +1,270 @@ +package govalidator + +import ( + "errors" + "fmt" + "html" + "math" + "path" + "regexp" + "strings" + "unicode" + "unicode/utf8" +) + +// Contains checks if the string contains the substring. +func Contains(str, substring string) bool { + return strings.Contains(str, substring) +} + +// Matches checks if string matches the pattern (pattern is regular expression) +// In case of error return false +func Matches(str, pattern string) bool { + match, _ := regexp.MatchString(pattern, str) + return match +} + +// LeftTrim trims characters from the left side of the input. +// If second argument is empty, it will remove leading spaces. +func LeftTrim(str, chars string) string { + if chars == "" { + return strings.TrimLeftFunc(str, unicode.IsSpace) + } + r, _ := regexp.Compile("^[" + chars + "]+") + return r.ReplaceAllString(str, "") +} + +// RightTrim trims characters from the right side of the input. +// If second argument is empty, it will remove trailing spaces. +func RightTrim(str, chars string) string { + if chars == "" { + return strings.TrimRightFunc(str, unicode.IsSpace) + } + r, _ := regexp.Compile("[" + chars + "]+$") + return r.ReplaceAllString(str, "") +} + +// Trim trims characters from both sides of the input. +// If second argument is empty, it will remove spaces. +func Trim(str, chars string) string { + return LeftTrim(RightTrim(str, chars), chars) +} + +// WhiteList removes characters that do not appear in the whitelist. +func WhiteList(str, chars string) string { + pattern := "[^" + chars + "]+" + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, "") +} + +// BlackList removes characters that appear in the blacklist. +func BlackList(str, chars string) string { + pattern := "[" + chars + "]+" + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, "") +} + +// StripLow removes characters with a numerical value < 32 and 127, mostly control characters. +// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD). +func StripLow(str string, keepNewLines bool) string { + chars := "" + if keepNewLines { + chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F" + } else { + chars = "\x00-\x1F\x7F" + } + return BlackList(str, chars) +} + +// ReplacePattern replaces regular expression pattern in string +func ReplacePattern(str, pattern, replace string) string { + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, replace) +} + +// Escape replaces <, >, & and " with HTML entities. +var Escape = html.EscapeString + +func addSegment(inrune, segment []rune) []rune { + if len(segment) == 0 { + return inrune + } + if len(inrune) != 0 { + inrune = append(inrune, '_') + } + inrune = append(inrune, segment...) + return inrune +} + +// UnderscoreToCamelCase converts from underscore separated form to camel case form. +// Ex.: my_func => MyFunc +func UnderscoreToCamelCase(s string) string { + return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1) +} + +// CamelCaseToUnderscore converts from camel case form to underscore separated form. +// Ex.: MyFunc => my_func +func CamelCaseToUnderscore(str string) string { + var output []rune + var segment []rune + for _, r := range str { + + // not treat number as separate segment + if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) { + output = addSegment(output, segment) + segment = nil + } + segment = append(segment, unicode.ToLower(r)) + } + output = addSegment(output, segment) + return string(output) +} + +// Reverse returns reversed string +func Reverse(s string) string { + r := []rune(s) + for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r) +} + +// GetLines splits string by "\n" and return array of lines +func GetLines(s string) []string { + return strings.Split(s, "\n") +} + +// GetLine returns specified line of multiline string +func GetLine(s string, index int) (string, error) { + lines := GetLines(s) + if index < 0 || index >= len(lines) { + return "", errors.New("line index out of bounds") + } + return lines[index], nil +} + +// RemoveTags removes all tags from HTML string +func RemoveTags(s string) string { + return ReplacePattern(s, "<[^>]*>", "") +} + +// SafeFileName returns safe string that can be used in file names +func SafeFileName(str string) string { + name := strings.ToLower(str) + name = path.Clean(path.Base(name)) + name = strings.Trim(name, " ") + separators, err := regexp.Compile(`[ &_=+:]`) + if err == nil { + name = separators.ReplaceAllString(name, "-") + } + legal, err := regexp.Compile(`[^[:alnum:]-.]`) + if err == nil { + name = legal.ReplaceAllString(name, "") + } + for strings.Contains(name, "--") { + name = strings.Replace(name, "--", "-", -1) + } + return name +} + +// NormalizeEmail canonicalize an email address. +// The local part of the email address is lowercased for all domains; the hostname is always lowercased and +// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail). +// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and +// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are +// normalized to @gmail.com. +func NormalizeEmail(str string) (string, error) { + if !IsEmail(str) { + return "", fmt.Errorf("%s is not an email", str) + } + parts := strings.Split(str, "@") + parts[0] = strings.ToLower(parts[0]) + parts[1] = strings.ToLower(parts[1]) + if parts[1] == "gmail.com" || parts[1] == "googlemail.com" { + parts[1] = "gmail.com" + parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0] + } + return strings.Join(parts, "@"), nil +} + +// Truncate a string to the closest length without breaking words. +func Truncate(str string, length int, ending string) string { + var aftstr, befstr string + if len(str) > length { + words := strings.Fields(str) + before, present := 0, 0 + for i := range words { + befstr = aftstr + before = present + aftstr = aftstr + words[i] + " " + present = len(aftstr) + if present > length && i != 0 { + if (length - before) < (present - length) { + return Trim(befstr, " /\\.,\"'#!?&@+-") + ending + } + return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending + } + } + } + + return str +} + +// PadLeft pads left side of a string if size of string is less then indicated pad length +func PadLeft(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, true, false) +} + +// PadRight pads right side of a string if size of string is less then indicated pad length +func PadRight(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, false, true) +} + +// PadBoth pads both sides of a string if size of string is less then indicated pad length +func PadBoth(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, true, true) +} + +// PadString either left, right or both sides. +// Note that padding string can be unicode and more then one character +func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string { + + // When padded length is less then the current string size + if padLen < utf8.RuneCountInString(str) { + return str + } + + padLen -= utf8.RuneCountInString(str) + + targetLen := padLen + + targetLenLeft := targetLen + targetLenRight := targetLen + if padLeft && padRight { + targetLenLeft = padLen / 2 + targetLenRight = padLen - targetLenLeft + } + + strToRepeatLen := utf8.RuneCountInString(padStr) + + repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen))) + repeatedString := strings.Repeat(padStr, repeatTimes) + + leftSide := "" + if padLeft { + leftSide = repeatedString[0:targetLenLeft] + } + + rightSide := "" + if padRight { + rightSide = repeatedString[0:targetLenRight] + } + + return leftSide + str + rightSide +} + +// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object +func TruncatingErrorf(str string, args ...interface{}) error { + n := strings.Count(str, "%s") + return fmt.Errorf(str, args[:n]...) +} diff --git a/test/tools/vendor/github.com/asaskevich/govalidator/validator.go b/test/tools/vendor/github.com/asaskevich/govalidator/validator.go new file mode 100644 index 0000000000..c9c4fac065 --- /dev/null +++ b/test/tools/vendor/github.com/asaskevich/govalidator/validator.go @@ -0,0 +1,1768 @@ +// Package govalidator is package of validators and sanitizers for strings, structs and collections. +package govalidator + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "net" + "net/url" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +var ( + fieldsRequiredByDefault bool + nilPtrAllowedByRequired = false + notNumberRegexp = regexp.MustCompile("[^0-9]+") + whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`) + paramsRegexp = regexp.MustCompile(`\(.*\)$`) +) + +const maxURLRuneCount = 2083 +const minURLRuneCount = 3 +const rfc3339WithoutZone = "2006-01-02T15:04:05" + +// SetFieldsRequiredByDefault causes validation to fail when struct fields +// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). +// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): +// type exampleStruct struct { +// Name string `` +// Email string `valid:"email"` +// This, however, will only fail when Email is empty or an invalid email address: +// type exampleStruct2 struct { +// Name string `valid:"-"` +// Email string `valid:"email"` +// Lastly, this will only fail when Email is an invalid email address but not when it's empty: +// type exampleStruct2 struct { +// Name string `valid:"-"` +// Email string `valid:"email,optional"` +func SetFieldsRequiredByDefault(value bool) { + fieldsRequiredByDefault = value +} + +// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required. +// The validation will still reject ptr fields in their zero value state. Example with this enabled: +// type exampleStruct struct { +// Name *string `valid:"required"` +// With `Name` set to "", this will be considered invalid input and will cause a validation error. +// With `Name` set to nil, this will be considered valid by validation. +// By default this is disabled. +func SetNilPtrAllowedByRequired(value bool) { + nilPtrAllowedByRequired = value +} + +// IsEmail checks if the string is an email. +func IsEmail(str string) bool { + // TODO uppercase letters are not supported + return rxEmail.MatchString(str) +} + +// IsExistingEmail checks if the string is an email of existing domain +func IsExistingEmail(email string) bool { + + if len(email) < 6 || len(email) > 254 { + return false + } + at := strings.LastIndex(email, "@") + if at <= 0 || at > len(email)-3 { + return false + } + user := email[:at] + host := email[at+1:] + if len(user) > 64 { + return false + } + switch host { + case "localhost", "example.com": + return true + } + if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) { + return false + } + if _, err := net.LookupMX(host); err != nil { + if _, err := net.LookupIP(host); err != nil { + return false + } + } + + return true +} + +// IsURL checks if the string is an URL. +func IsURL(str string) bool { + if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { + return false + } + strTemp := str + if strings.Contains(str, ":") && !strings.Contains(str, "://") { + // support no indicated urlscheme but with colon for port number + // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString + strTemp = "http://" + str + } + u, err := url.Parse(strTemp) + if err != nil { + return false + } + if strings.HasPrefix(u.Host, ".") { + return false + } + if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { + return false + } + return rxURL.MatchString(str) +} + +// IsRequestURL checks if the string rawurl, assuming +// it was received in an HTTP request, is a valid +// URL confirm to RFC 3986 +func IsRequestURL(rawurl string) bool { + url, err := url.ParseRequestURI(rawurl) + if err != nil { + return false //Couldn't even parse the rawurl + } + if len(url.Scheme) == 0 { + return false //No Scheme found + } + return true +} + +// IsRequestURI checks if the string rawurl, assuming +// it was received in an HTTP request, is an +// absolute URI or an absolute path. +func IsRequestURI(rawurl string) bool { + _, err := url.ParseRequestURI(rawurl) + return err == nil +} + +// IsAlpha checks if the string contains only letters (a-zA-Z). Empty string is valid. +func IsAlpha(str string) bool { + if IsNull(str) { + return true + } + return rxAlpha.MatchString(str) +} + +//IsUTFLetter checks if the string contains only unicode letter characters. +//Similar to IsAlpha but for all languages. Empty string is valid. +func IsUTFLetter(str string) bool { + if IsNull(str) { + return true + } + + for _, c := range str { + if !unicode.IsLetter(c) { + return false + } + } + return true + +} + +// IsAlphanumeric checks if the string contains only letters and numbers. Empty string is valid. +func IsAlphanumeric(str string) bool { + if IsNull(str) { + return true + } + return rxAlphanumeric.MatchString(str) +} + +// IsUTFLetterNumeric checks if the string contains only unicode letters and numbers. Empty string is valid. +func IsUTFLetterNumeric(str string) bool { + if IsNull(str) { + return true + } + for _, c := range str { + if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok + return false + } + } + return true + +} + +// IsNumeric checks if the string contains only numbers. Empty string is valid. +func IsNumeric(str string) bool { + if IsNull(str) { + return true + } + return rxNumeric.MatchString(str) +} + +// IsUTFNumeric checks if the string contains only unicode numbers of any kind. +// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid. +func IsUTFNumeric(str string) bool { + if IsNull(str) { + return true + } + if strings.IndexAny(str, "+-") > 0 { + return false + } + if len(str) > 1 { + str = strings.TrimPrefix(str, "-") + str = strings.TrimPrefix(str, "+") + } + for _, c := range str { + if !unicode.IsNumber(c) { //numbers && minus sign are ok + return false + } + } + return true + +} + +// IsUTFDigit checks if the string contains only unicode radix-10 decimal digits. Empty string is valid. +func IsUTFDigit(str string) bool { + if IsNull(str) { + return true + } + if strings.IndexAny(str, "+-") > 0 { + return false + } + if len(str) > 1 { + str = strings.TrimPrefix(str, "-") + str = strings.TrimPrefix(str, "+") + } + for _, c := range str { + if !unicode.IsDigit(c) { //digits && minus sign are ok + return false + } + } + return true + +} + +// IsHexadecimal checks if the string is a hexadecimal number. +func IsHexadecimal(str string) bool { + return rxHexadecimal.MatchString(str) +} + +// IsHexcolor checks if the string is a hexadecimal color. +func IsHexcolor(str string) bool { + return rxHexcolor.MatchString(str) +} + +// IsRGBcolor checks if the string is a valid RGB color in form rgb(RRR, GGG, BBB). +func IsRGBcolor(str string) bool { + return rxRGBcolor.MatchString(str) +} + +// IsLowerCase checks if the string is lowercase. Empty string is valid. +func IsLowerCase(str string) bool { + if IsNull(str) { + return true + } + return str == strings.ToLower(str) +} + +// IsUpperCase checks if the string is uppercase. Empty string is valid. +func IsUpperCase(str string) bool { + if IsNull(str) { + return true + } + return str == strings.ToUpper(str) +} + +// HasLowerCase checks if the string contains at least 1 lowercase. Empty string is valid. +func HasLowerCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasLowerCase.MatchString(str) +} + +// HasUpperCase checks if the string contains as least 1 uppercase. Empty string is valid. +func HasUpperCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasUpperCase.MatchString(str) +} + +// IsInt checks if the string is an integer. Empty string is valid. +func IsInt(str string) bool { + if IsNull(str) { + return true + } + return rxInt.MatchString(str) +} + +// IsFloat checks if the string is a float. +func IsFloat(str string) bool { + return str != "" && rxFloat.MatchString(str) +} + +// IsDivisibleBy checks if the string is a number that's divisible by another. +// If second argument is not valid integer or zero, it's return false. +// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero). +func IsDivisibleBy(str, num string) bool { + f, _ := ToFloat(str) + p := int64(f) + q, _ := ToInt(num) + if q == 0 { + return false + } + return (p == 0) || (p%q == 0) +} + +// IsNull checks if the string is null. +func IsNull(str string) bool { + return len(str) == 0 +} + +// IsNotNull checks if the string is not null. +func IsNotNull(str string) bool { + return !IsNull(str) +} + +// HasWhitespaceOnly checks the string only contains whitespace +func HasWhitespaceOnly(str string) bool { + return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str) +} + +// HasWhitespace checks if the string contains any whitespace +func HasWhitespace(str string) bool { + return len(str) > 0 && rxHasWhitespace.MatchString(str) +} + +// IsByteLength checks if the string's length (in bytes) falls in a range. +func IsByteLength(str string, min, max int) bool { + return len(str) >= min && len(str) <= max +} + +// IsUUIDv3 checks if the string is a UUID version 3. +func IsUUIDv3(str string) bool { + return rxUUID3.MatchString(str) +} + +// IsUUIDv4 checks if the string is a UUID version 4. +func IsUUIDv4(str string) bool { + return rxUUID4.MatchString(str) +} + +// IsUUIDv5 checks if the string is a UUID version 5. +func IsUUIDv5(str string) bool { + return rxUUID5.MatchString(str) +} + +// IsUUID checks if the string is a UUID (version 3, 4 or 5). +func IsUUID(str string) bool { + return rxUUID.MatchString(str) +} + +// Byte to index table for O(1) lookups when unmarshaling. +// We use 0xFF as sentinel value for invalid indexes. +var ulidDec = [...]byte{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, + 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, + 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, + 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, + 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, + 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, + 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, +} + +// EncodedSize is the length of a text encoded ULID. +const ulidEncodedSize = 26 + +// IsULID checks if the string is a ULID. +// +// Implementation got from: +// https://github.com/oklog/ulid (Apache-2.0 License) +// +func IsULID(str string) bool { + // Check if a base32 encoded ULID is the right length. + if len(str) != ulidEncodedSize { + return false + } + + // Check if all the characters in a base32 encoded ULID are part of the + // expected base32 character set. + if ulidDec[str[0]] == 0xFF || + ulidDec[str[1]] == 0xFF || + ulidDec[str[2]] == 0xFF || + ulidDec[str[3]] == 0xFF || + ulidDec[str[4]] == 0xFF || + ulidDec[str[5]] == 0xFF || + ulidDec[str[6]] == 0xFF || + ulidDec[str[7]] == 0xFF || + ulidDec[str[8]] == 0xFF || + ulidDec[str[9]] == 0xFF || + ulidDec[str[10]] == 0xFF || + ulidDec[str[11]] == 0xFF || + ulidDec[str[12]] == 0xFF || + ulidDec[str[13]] == 0xFF || + ulidDec[str[14]] == 0xFF || + ulidDec[str[15]] == 0xFF || + ulidDec[str[16]] == 0xFF || + ulidDec[str[17]] == 0xFF || + ulidDec[str[18]] == 0xFF || + ulidDec[str[19]] == 0xFF || + ulidDec[str[20]] == 0xFF || + ulidDec[str[21]] == 0xFF || + ulidDec[str[22]] == 0xFF || + ulidDec[str[23]] == 0xFF || + ulidDec[str[24]] == 0xFF || + ulidDec[str[25]] == 0xFF { + return false + } + + // Check if the first character in a base32 encoded ULID will overflow. This + // happens because the base32 representation encodes 130 bits, while the + // ULID is only 128 bits. + // + // See https://github.com/oklog/ulid/issues/9 for details. + if str[0] > '7' { + return false + } + return true +} + +// IsCreditCard checks if the string is a credit card. +func IsCreditCard(str string) bool { + sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") + if !rxCreditCard.MatchString(sanitized) { + return false + } + + number, _ := ToInt(sanitized) + number, lastDigit := number / 10, number % 10 + + var sum int64 + for i:=0; number > 0; i++ { + digit := number % 10 + + if i % 2 == 0 { + digit *= 2 + if digit > 9 { + digit -= 9 + } + } + + sum += digit + number = number / 10 + } + + return (sum + lastDigit) % 10 == 0 +} + +// IsISBN10 checks if the string is an ISBN version 10. +func IsISBN10(str string) bool { + return IsISBN(str, 10) +} + +// IsISBN13 checks if the string is an ISBN version 13. +func IsISBN13(str string) bool { + return IsISBN(str, 13) +} + +// IsISBN checks if the string is an ISBN (version 10 or 13). +// If version value is not equal to 10 or 13, it will be checks both variants. +func IsISBN(str string, version int) bool { + sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") + var checksum int32 + var i int32 + if version == 10 { + if !rxISBN10.MatchString(sanitized) { + return false + } + for i = 0; i < 9; i++ { + checksum += (i + 1) * int32(sanitized[i]-'0') + } + if sanitized[9] == 'X' { + checksum += 10 * 10 + } else { + checksum += 10 * int32(sanitized[9]-'0') + } + if checksum%11 == 0 { + return true + } + return false + } else if version == 13 { + if !rxISBN13.MatchString(sanitized) { + return false + } + factor := []int32{1, 3} + for i = 0; i < 12; i++ { + checksum += factor[i%2] * int32(sanitized[i]-'0') + } + return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0 + } + return IsISBN(str, 10) || IsISBN(str, 13) +} + +// IsJSON checks if the string is valid JSON (note: uses json.Unmarshal). +func IsJSON(str string) bool { + var js json.RawMessage + return json.Unmarshal([]byte(str), &js) == nil +} + +// IsMultibyte checks if the string contains one or more multibyte chars. Empty string is valid. +func IsMultibyte(str string) bool { + if IsNull(str) { + return true + } + return rxMultibyte.MatchString(str) +} + +// IsASCII checks if the string contains ASCII chars only. Empty string is valid. +func IsASCII(str string) bool { + if IsNull(str) { + return true + } + return rxASCII.MatchString(str) +} + +// IsPrintableASCII checks if the string contains printable ASCII chars only. Empty string is valid. +func IsPrintableASCII(str string) bool { + if IsNull(str) { + return true + } + return rxPrintableASCII.MatchString(str) +} + +// IsFullWidth checks if the string contains any full-width chars. Empty string is valid. +func IsFullWidth(str string) bool { + if IsNull(str) { + return true + } + return rxFullWidth.MatchString(str) +} + +// IsHalfWidth checks if the string contains any half-width chars. Empty string is valid. +func IsHalfWidth(str string) bool { + if IsNull(str) { + return true + } + return rxHalfWidth.MatchString(str) +} + +// IsVariableWidth checks if the string contains a mixture of full and half-width chars. Empty string is valid. +func IsVariableWidth(str string) bool { + if IsNull(str) { + return true + } + return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str) +} + +// IsBase64 checks if a string is base64 encoded. +func IsBase64(str string) bool { + return rxBase64.MatchString(str) +} + +// IsFilePath checks is a string is Win or Unix file path and returns it's type. +func IsFilePath(str string) (bool, int) { + if rxWinPath.MatchString(str) { + //check windows path limit see: + // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath + if len(str[3:]) > 32767 { + return false, Win + } + return true, Win + } else if rxUnixPath.MatchString(str) { + return true, Unix + } + return false, Unknown +} + +//IsWinFilePath checks both relative & absolute paths in Windows +func IsWinFilePath(str string) bool { + if rxARWinPath.MatchString(str) { + //check windows path limit see: + // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath + if len(str[3:]) > 32767 { + return false + } + return true + } + return false +} + +//IsUnixFilePath checks both relative & absolute paths in Unix +func IsUnixFilePath(str string) bool { + if rxARUnixPath.MatchString(str) { + return true + } + return false +} + +// IsDataURI checks if a string is base64 encoded data URI such as an image +func IsDataURI(str string) bool { + dataURI := strings.Split(str, ",") + if !rxDataURI.MatchString(dataURI[0]) { + return false + } + return IsBase64(dataURI[1]) +} + +// IsMagnetURI checks if a string is valid magnet URI +func IsMagnetURI(str string) bool { + return rxMagnetURI.MatchString(str) +} + +// IsISO3166Alpha2 checks if a string is valid two-letter country code +func IsISO3166Alpha2(str string) bool { + for _, entry := range ISO3166List { + if str == entry.Alpha2Code { + return true + } + } + return false +} + +// IsISO3166Alpha3 checks if a string is valid three-letter country code +func IsISO3166Alpha3(str string) bool { + for _, entry := range ISO3166List { + if str == entry.Alpha3Code { + return true + } + } + return false +} + +// IsISO693Alpha2 checks if a string is valid two-letter language code +func IsISO693Alpha2(str string) bool { + for _, entry := range ISO693List { + if str == entry.Alpha2Code { + return true + } + } + return false +} + +// IsISO693Alpha3b checks if a string is valid three-letter language code +func IsISO693Alpha3b(str string) bool { + for _, entry := range ISO693List { + if str == entry.Alpha3bCode { + return true + } + } + return false +} + +// IsDNSName will validate the given string as a DNS name +func IsDNSName(str string) bool { + if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 { + // constraints already violated + return false + } + return !IsIP(str) && rxDNSName.MatchString(str) +} + +// IsHash checks if a string is a hash of type algorithm. +// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b'] +func IsHash(str string, algorithm string) bool { + var len string + algo := strings.ToLower(algorithm) + + if algo == "crc32" || algo == "crc32b" { + len = "8" + } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" { + len = "32" + } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" { + len = "40" + } else if algo == "tiger192" { + len = "48" + } else if algo == "sha3-224" { + len = "56" + } else if algo == "sha256" || algo == "sha3-256" { + len = "64" + } else if algo == "sha384" || algo == "sha3-384" { + len = "96" + } else if algo == "sha512" || algo == "sha3-512" { + len = "128" + } else { + return false + } + + return Matches(str, "^[a-f0-9]{"+len+"}$") +} + +// IsSHA3224 checks is a string is a SHA3-224 hash. Alias for `IsHash(str, "sha3-224")` +func IsSHA3224(str string) bool { + return IsHash(str, "sha3-224") +} + +// IsSHA3256 checks is a string is a SHA3-256 hash. Alias for `IsHash(str, "sha3-256")` +func IsSHA3256(str string) bool { + return IsHash(str, "sha3-256") +} + +// IsSHA3384 checks is a string is a SHA3-384 hash. Alias for `IsHash(str, "sha3-384")` +func IsSHA3384(str string) bool { + return IsHash(str, "sha3-384") +} + +// IsSHA3512 checks is a string is a SHA3-512 hash. Alias for `IsHash(str, "sha3-512")` +func IsSHA3512(str string) bool { + return IsHash(str, "sha3-512") +} + +// IsSHA512 checks is a string is a SHA512 hash. Alias for `IsHash(str, "sha512")` +func IsSHA512(str string) bool { + return IsHash(str, "sha512") +} + +// IsSHA384 checks is a string is a SHA384 hash. Alias for `IsHash(str, "sha384")` +func IsSHA384(str string) bool { + return IsHash(str, "sha384") +} + +// IsSHA256 checks is a string is a SHA256 hash. Alias for `IsHash(str, "sha256")` +func IsSHA256(str string) bool { + return IsHash(str, "sha256") +} + +// IsTiger192 checks is a string is a Tiger192 hash. Alias for `IsHash(str, "tiger192")` +func IsTiger192(str string) bool { + return IsHash(str, "tiger192") +} + +// IsTiger160 checks is a string is a Tiger160 hash. Alias for `IsHash(str, "tiger160")` +func IsTiger160(str string) bool { + return IsHash(str, "tiger160") +} + +// IsRipeMD160 checks is a string is a RipeMD160 hash. Alias for `IsHash(str, "ripemd160")` +func IsRipeMD160(str string) bool { + return IsHash(str, "ripemd160") +} + +// IsSHA1 checks is a string is a SHA-1 hash. Alias for `IsHash(str, "sha1")` +func IsSHA1(str string) bool { + return IsHash(str, "sha1") +} + +// IsTiger128 checks is a string is a Tiger128 hash. Alias for `IsHash(str, "tiger128")` +func IsTiger128(str string) bool { + return IsHash(str, "tiger128") +} + +// IsRipeMD128 checks is a string is a RipeMD128 hash. Alias for `IsHash(str, "ripemd128")` +func IsRipeMD128(str string) bool { + return IsHash(str, "ripemd128") +} + +// IsCRC32 checks is a string is a CRC32 hash. Alias for `IsHash(str, "crc32")` +func IsCRC32(str string) bool { + return IsHash(str, "crc32") +} + +// IsCRC32b checks is a string is a CRC32b hash. Alias for `IsHash(str, "crc32b")` +func IsCRC32b(str string) bool { + return IsHash(str, "crc32b") +} + +// IsMD5 checks is a string is a MD5 hash. Alias for `IsHash(str, "md5")` +func IsMD5(str string) bool { + return IsHash(str, "md5") +} + +// IsMD4 checks is a string is a MD4 hash. Alias for `IsHash(str, "md4")` +func IsMD4(str string) bool { + return IsHash(str, "md4") +} + +// IsDialString validates the given string for usage with the various Dial() functions +func IsDialString(str string) bool { + if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) { + return true + } + + return false +} + +// IsIP checks if a string is either IP version 4 or 6. Alias for `net.ParseIP` +func IsIP(str string) bool { + return net.ParseIP(str) != nil +} + +// IsPort checks if a string represents a valid port +func IsPort(str string) bool { + if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 { + return true + } + return false +} + +// IsIPv4 checks if the string is an IP version 4. +func IsIPv4(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ".") +} + +// IsIPv6 checks if the string is an IP version 6. +func IsIPv6(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ":") +} + +// IsCIDR checks if the string is an valid CIDR notiation (IPV4 & IPV6) +func IsCIDR(str string) bool { + _, _, err := net.ParseCIDR(str) + return err == nil +} + +// IsMAC checks if a string is valid MAC address. +// Possible MAC formats: +// 01:23:45:67:89:ab +// 01:23:45:67:89:ab:cd:ef +// 01-23-45-67-89-ab +// 01-23-45-67-89-ab-cd-ef +// 0123.4567.89ab +// 0123.4567.89ab.cdef +func IsMAC(str string) bool { + _, err := net.ParseMAC(str) + return err == nil +} + +// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name +func IsHost(str string) bool { + return IsIP(str) || IsDNSName(str) +} + +// IsMongoID checks if the string is a valid hex-encoded representation of a MongoDB ObjectId. +func IsMongoID(str string) bool { + return rxHexadecimal.MatchString(str) && (len(str) == 24) +} + +// IsLatitude checks if a string is valid latitude. +func IsLatitude(str string) bool { + return rxLatitude.MatchString(str) +} + +// IsLongitude checks if a string is valid longitude. +func IsLongitude(str string) bool { + return rxLongitude.MatchString(str) +} + +// IsIMEI checks if a string is valid IMEI +func IsIMEI(str string) bool { + return rxIMEI.MatchString(str) +} + +// IsIMSI checks if a string is valid IMSI +func IsIMSI(str string) bool { + if !rxIMSI.MatchString(str) { + return false + } + + mcc, err := strconv.ParseInt(str[0:3], 10, 32) + if err != nil { + return false + } + + switch mcc { + case 202, 204, 206, 208, 212, 213, 214, 216, 218, 219: + case 220, 221, 222, 226, 228, 230, 231, 232, 234, 235: + case 238, 240, 242, 244, 246, 247, 248, 250, 255, 257: + case 259, 260, 262, 266, 268, 270, 272, 274, 276, 278: + case 280, 282, 283, 284, 286, 288, 289, 290, 292, 293: + case 294, 295, 297, 302, 308, 310, 311, 312, 313, 314: + case 315, 316, 330, 332, 334, 338, 340, 342, 344, 346: + case 348, 350, 352, 354, 356, 358, 360, 362, 363, 364: + case 365, 366, 368, 370, 372, 374, 376, 400, 401, 402: + case 404, 405, 406, 410, 412, 413, 414, 415, 416, 417: + case 418, 419, 420, 421, 422, 424, 425, 426, 427, 428: + case 429, 430, 431, 432, 434, 436, 437, 438, 440, 441: + case 450, 452, 454, 455, 456, 457, 460, 461, 466, 467: + case 470, 472, 502, 505, 510, 514, 515, 520, 525, 528: + case 530, 536, 537, 539, 540, 541, 542, 543, 544, 545: + case 546, 547, 548, 549, 550, 551, 552, 553, 554, 555: + case 602, 603, 604, 605, 606, 607, 608, 609, 610, 611: + case 612, 613, 614, 615, 616, 617, 618, 619, 620, 621: + case 622, 623, 624, 625, 626, 627, 628, 629, 630, 631: + case 632, 633, 634, 635, 636, 637, 638, 639, 640, 641: + case 642, 643, 645, 646, 647, 648, 649, 650, 651, 652: + case 653, 654, 655, 657, 658, 659, 702, 704, 706, 708: + case 710, 712, 714, 716, 722, 724, 730, 732, 734, 736: + case 738, 740, 742, 744, 746, 748, 750, 995: + return true + default: + return false + } + return true +} + +// IsRsaPublicKey checks if a string is valid public key with provided length +func IsRsaPublicKey(str string, keylen int) bool { + bb := bytes.NewBufferString(str) + pemBytes, err := ioutil.ReadAll(bb) + if err != nil { + return false + } + block, _ := pem.Decode(pemBytes) + if block != nil && block.Type != "PUBLIC KEY" { + return false + } + var der []byte + + if block != nil { + der = block.Bytes + } else { + der, err = base64.StdEncoding.DecodeString(str) + if err != nil { + return false + } + } + + key, err := x509.ParsePKIXPublicKey(der) + if err != nil { + return false + } + pubkey, ok := key.(*rsa.PublicKey) + if !ok { + return false + } + bitlen := len(pubkey.N.Bytes()) * 8 + return bitlen == int(keylen) +} + +// IsRegex checks if a give string is a valid regex with RE2 syntax or not +func IsRegex(str string) bool { + if _, err := regexp.Compile(str); err == nil { + return true + } + return false +} + +func toJSONName(tag string) string { + if tag == "" { + return "" + } + + // JSON name always comes first. If there's no options then split[0] is + // JSON name, if JSON name is not set, then split[0] is an empty string. + split := strings.SplitN(tag, ",", 2) + + name := split[0] + + // However it is possible that the field is skipped when + // (de-)serializing from/to JSON, in which case assume that there is no + // tag name to use + if name == "-" { + return "" + } + return name +} + +func prependPathToErrors(err error, path string) error { + switch err2 := err.(type) { + case Error: + err2.Path = append([]string{path}, err2.Path...) + return err2 + case Errors: + errors := err2.Errors() + for i, err3 := range errors { + errors[i] = prependPathToErrors(err3, path) + } + return err2 + } + return err +} + +// ValidateArray performs validation according to condition iterator that validates every element of the array +func ValidateArray(array []interface{}, iterator ConditionIterator) bool { + return Every(array, iterator) +} + +// ValidateMap use validation map for fields. +// result will be equal to `false` if there are any errors. +// s is the map containing the data to be validated. +// m is the validation map in the form: +// map[string]interface{}{"name":"required,alpha","address":map[string]interface{}{"line1":"required,alphanum"}} +func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, error) { + if s == nil { + return true, nil + } + result := true + var err error + var errs Errors + var index int + val := reflect.ValueOf(s) + for key, value := range s { + presentResult := true + validator, ok := m[key] + if !ok { + presentResult = false + var err error + err = fmt.Errorf("all map keys has to be present in the validation map; got %s", key) + err = prependPathToErrors(err, key) + errs = append(errs, err) + } + valueField := reflect.ValueOf(value) + mapResult := true + typeResult := true + structResult := true + resultField := true + switch subValidator := validator.(type) { + case map[string]interface{}: + var err error + if v, ok := value.(map[string]interface{}); !ok { + mapResult = false + err = fmt.Errorf("map validator has to be for the map type only; got %s", valueField.Type().String()) + err = prependPathToErrors(err, key) + errs = append(errs, err) + } else { + mapResult, err = ValidateMap(v, subValidator) + if err != nil { + mapResult = false + err = prependPathToErrors(err, key) + errs = append(errs, err) + } + } + case string: + if (valueField.Kind() == reflect.Struct || + (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && + subValidator != "-" { + var err error + structResult, err = ValidateStruct(valueField.Interface()) + if err != nil { + err = prependPathToErrors(err, key) + errs = append(errs, err) + } + } + resultField, err = typeCheck(valueField, reflect.StructField{ + Name: key, + PkgPath: "", + Type: val.Type(), + Tag: reflect.StructTag(fmt.Sprintf("%s:%q", tagName, subValidator)), + Offset: 0, + Index: []int{index}, + Anonymous: false, + }, val, nil) + if err != nil { + errs = append(errs, err) + } + case nil: + // already handlerd when checked before + default: + typeResult = false + err = fmt.Errorf("map validator has to be either map[string]interface{} or string; got %s", valueField.Type().String()) + err = prependPathToErrors(err, key) + errs = append(errs, err) + } + result = result && presentResult && typeResult && resultField && structResult && mapResult + index++ + } + // checks required keys + requiredResult := true + for key, value := range m { + if schema, ok := value.(string); ok { + tags := parseTagIntoMap(schema) + if required, ok := tags["required"]; ok { + if _, ok := s[key]; !ok { + requiredResult = false + if required.customErrorMessage != "" { + err = Error{key, fmt.Errorf(required.customErrorMessage), true, "required", []string{}} + } else { + err = Error{key, fmt.Errorf("required field missing"), false, "required", []string{}} + } + errs = append(errs, err) + } + } + } + } + + if len(errs) > 0 { + err = errs + } + return result && requiredResult, err +} + +// ValidateStruct use tags for fields. +// result will be equal to `false` if there are any errors. +// todo currently there is no guarantee that errors will be returned in predictable order (tests may to fail) +func ValidateStruct(s interface{}) (bool, error) { + if s == nil { + return true, nil + } + result := true + var err error + val := reflect.ValueOf(s) + if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + val = val.Elem() + } + // we only accept structs + if val.Kind() != reflect.Struct { + return false, fmt.Errorf("function only accepts structs; got %s", val.Kind()) + } + var errs Errors + for i := 0; i < val.NumField(); i++ { + valueField := val.Field(i) + typeField := val.Type().Field(i) + if typeField.PkgPath != "" { + continue // Private field + } + structResult := true + if valueField.Kind() == reflect.Interface { + valueField = valueField.Elem() + } + if (valueField.Kind() == reflect.Struct || + (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && + typeField.Tag.Get(tagName) != "-" { + var err error + structResult, err = ValidateStruct(valueField.Interface()) + if err != nil { + err = prependPathToErrors(err, typeField.Name) + errs = append(errs, err) + } + } + resultField, err2 := typeCheck(valueField, typeField, val, nil) + if err2 != nil { + + // Replace structure name with JSON name if there is a tag on the variable + jsonTag := toJSONName(typeField.Tag.Get("json")) + if jsonTag != "" { + switch jsonError := err2.(type) { + case Error: + jsonError.Name = jsonTag + err2 = jsonError + case Errors: + for i2, err3 := range jsonError { + switch customErr := err3.(type) { + case Error: + customErr.Name = jsonTag + jsonError[i2] = customErr + } + } + + err2 = jsonError + } + } + + errs = append(errs, err2) + } + result = result && resultField && structResult + } + if len(errs) > 0 { + err = errs + } + return result, err +} + +// ValidateStructAsync performs async validation of the struct and returns results through the channels +func ValidateStructAsync(s interface{}) (<-chan bool, <-chan error) { + res := make(chan bool) + errors := make(chan error) + + go func() { + defer close(res) + defer close(errors) + + isValid, isFailed := ValidateStruct(s) + + res <- isValid + errors <- isFailed + }() + + return res, errors +} + +// ValidateMapAsync performs async validation of the map and returns results through the channels +func ValidateMapAsync(s map[string]interface{}, m map[string]interface{}) (<-chan bool, <-chan error) { + res := make(chan bool) + errors := make(chan error) + + go func() { + defer close(res) + defer close(errors) + + isValid, isFailed := ValidateMap(s, m) + + res <- isValid + errors <- isFailed + }() + + return res, errors +} + +// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""} +func parseTagIntoMap(tag string) tagOptionsMap { + optionsMap := make(tagOptionsMap) + options := strings.Split(tag, ",") + + for i, option := range options { + option = strings.TrimSpace(option) + + validationOptions := strings.Split(option, "~") + if !isValidTag(validationOptions[0]) { + continue + } + if len(validationOptions) == 2 { + optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i} + } else { + optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i} + } + } + return optionsMap +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +// IsSSN will validate the given string as a U.S. Social Security Number +func IsSSN(str string) bool { + if str == "" || len(str) != 11 { + return false + } + return rxSSN.MatchString(str) +} + +// IsSemver checks if string is valid semantic version +func IsSemver(str string) bool { + return rxSemver.MatchString(str) +} + +// IsType checks if interface is of some type +func IsType(v interface{}, params ...string) bool { + if len(params) == 1 { + typ := params[0] + return strings.Replace(reflect.TypeOf(v).String(), " ", "", -1) == strings.Replace(typ, " ", "", -1) + } + return false +} + +// IsTime checks if string is valid according to given format +func IsTime(str string, format string) bool { + _, err := time.Parse(format, str) + return err == nil +} + +// IsUnixTime checks if string is valid unix timestamp value +func IsUnixTime(str string) bool { + if _, err := strconv.Atoi(str); err == nil { + return true + } + return false +} + +// IsRFC3339 checks if string is valid timestamp value according to RFC3339 +func IsRFC3339(str string) bool { + return IsTime(str, time.RFC3339) +} + +// IsRFC3339WithoutZone checks if string is valid timestamp value according to RFC3339 which excludes the timezone. +func IsRFC3339WithoutZone(str string) bool { + return IsTime(str, rfc3339WithoutZone) +} + +// IsISO4217 checks if string is valid ISO currency code +func IsISO4217(str string) bool { + for _, currency := range ISO4217List { + if str == currency { + return true + } + } + + return false +} + +// ByteLength checks string's length +func ByteLength(str string, params ...string) bool { + if len(params) == 2 { + min, _ := ToInt(params[0]) + max, _ := ToInt(params[1]) + return len(str) >= int(min) && len(str) <= int(max) + } + + return false +} + +// RuneLength checks string's length +// Alias for StringLength +func RuneLength(str string, params ...string) bool { + return StringLength(str, params...) +} + +// IsRsaPub checks whether string is valid RSA key +// Alias for IsRsaPublicKey +func IsRsaPub(str string, params ...string) bool { + if len(params) == 1 { + len, _ := ToInt(params[0]) + return IsRsaPublicKey(str, int(len)) + } + + return false +} + +// StringMatches checks if a string matches a given pattern. +func StringMatches(s string, params ...string) bool { + if len(params) == 1 { + pattern := params[0] + return Matches(s, pattern) + } + return false +} + +// StringLength checks string's length (including multi byte strings) +func StringLength(str string, params ...string) bool { + + if len(params) == 2 { + strLength := utf8.RuneCountInString(str) + min, _ := ToInt(params[0]) + max, _ := ToInt(params[1]) + return strLength >= int(min) && strLength <= int(max) + } + + return false +} + +// MinStringLength checks string's minimum length (including multi byte strings) +func MinStringLength(str string, params ...string) bool { + + if len(params) == 1 { + strLength := utf8.RuneCountInString(str) + min, _ := ToInt(params[0]) + return strLength >= int(min) + } + + return false +} + +// MaxStringLength checks string's maximum length (including multi byte strings) +func MaxStringLength(str string, params ...string) bool { + + if len(params) == 1 { + strLength := utf8.RuneCountInString(str) + max, _ := ToInt(params[0]) + return strLength <= int(max) + } + + return false +} + +// Range checks string's length +func Range(str string, params ...string) bool { + if len(params) == 2 { + value, _ := ToFloat(str) + min, _ := ToFloat(params[0]) + max, _ := ToFloat(params[1]) + return InRange(value, min, max) + } + + return false +} + +// IsInRaw checks if string is in list of allowed values +func IsInRaw(str string, params ...string) bool { + if len(params) == 1 { + rawParams := params[0] + + parsedParams := strings.Split(rawParams, "|") + + return IsIn(str, parsedParams...) + } + + return false +} + +// IsIn checks if string str is a member of the set of strings params +func IsIn(str string, params ...string) bool { + for _, param := range params { + if str == param { + return true + } + } + + return false +} + +func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) { + if nilPtrAllowedByRequired { + k := v.Kind() + if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() { + return true, nil + } + } + + if requiredOption, isRequired := options["required"]; isRequired { + if len(requiredOption.customErrorMessage) > 0 { + return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}} + } + return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}} + } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional { + return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}} + } + // not required and empty is valid + return true, nil +} + +func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) { + if !v.IsValid() { + return false, nil + } + + tag := t.Tag.Get(tagName) + + // checks if the field should be ignored + switch tag { + case "": + if v.Kind() != reflect.Slice && v.Kind() != reflect.Map { + if !fieldsRequiredByDefault { + return true, nil + } + return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}} + } + case "-": + return true, nil + } + + isRootType := false + if options == nil { + isRootType = true + options = parseTagIntoMap(tag) + } + + if isEmptyValue(v) { + // an empty value is not validated, checks only required + isValid, resultErr = checkRequired(v, t, options) + for key := range options { + delete(options, key) + } + return isValid, resultErr + } + + var customTypeErrors Errors + optionsOrder := options.orderedKeys() + for _, validatorName := range optionsOrder { + validatorStruct := options[validatorName] + if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok { + delete(options, validatorName) + + if result := validatefunc(v.Interface(), o.Interface()); !result { + if len(validatorStruct.customErrorMessage) > 0 { + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)}) + continue + } + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)}) + } + } + } + + if len(customTypeErrors.Errors()) > 0 { + return false, customTypeErrors + } + + if isRootType { + // Ensure that we've checked the value by all specified validators before report that the value is valid + defer func() { + delete(options, "optional") + delete(options, "required") + + if isValid && resultErr == nil && len(options) != 0 { + optionsOrder := options.orderedKeys() + for _, validator := range optionsOrder { + isValid = false + resultErr = Error{t.Name, fmt.Errorf( + "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}} + return + } + } + }() + } + + for _, validatorSpec := range optionsOrder { + validatorStruct := options[validatorSpec] + var negate bool + validator := validatorSpec + customMsgExists := len(validatorStruct.customErrorMessage) > 0 + + // checks whether the tag looks like '!something' or 'something' + if validator[0] == '!' { + validator = validator[1:] + negate = true + } + + // checks for interface param validators + for key, value := range InterfaceParamTagRegexMap { + ps := value.FindStringSubmatch(validator) + if len(ps) == 0 { + continue + } + + validatefunc, ok := InterfaceParamTagMap[key] + if !ok { + continue + } + + delete(options, validatorSpec) + + field := fmt.Sprint(v) + if result := validatefunc(v.Interface(), ps[1:]...); (!result && !negate) || (result && negate) { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + } + } + + switch v.Kind() { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, + reflect.String: + // for each tag option checks the map of validator functions + for _, validatorSpec := range optionsOrder { + validatorStruct := options[validatorSpec] + var negate bool + validator := validatorSpec + customMsgExists := len(validatorStruct.customErrorMessage) > 0 + + // checks whether the tag looks like '!something' or 'something' + if validator[0] == '!' { + validator = validator[1:] + negate = true + } + + // checks for param validators + for key, value := range ParamTagRegexMap { + ps := value.FindStringSubmatch(validator) + if len(ps) == 0 { + continue + } + + validatefunc, ok := ParamTagMap[key] + if !ok { + continue + } + + delete(options, validatorSpec) + + switch v.Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + + field := fmt.Sprint(v) // make value into string, then validate with regex + if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + default: + // type not yet supported, fail + return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}} + } + } + + if validatefunc, ok := TagMap[validator]; ok { + delete(options, validatorSpec) + + switch v.Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + field := fmt.Sprint(v) // make value into string, then validate with regex + if result := validatefunc(field); !result && !negate || result && negate { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + default: + //Not Yet Supported Types (Fail here!) + err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v) + return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}} + } + } + } + return true, nil + case reflect.Map: + if v.Type().Key().Kind() != reflect.String { + return false, &UnsupportedTypeError{v.Type()} + } + var sv stringValues + sv = v.MapKeys() + sort.Sort(sv) + result := true + for i, k := range sv { + var resultItem bool + var err error + if v.MapIndex(k).Kind() != reflect.Struct { + resultItem, err = typeCheck(v.MapIndex(k), t, o, options) + if err != nil { + return false, err + } + } else { + resultItem, err = ValidateStruct(v.MapIndex(k).Interface()) + if err != nil { + err = prependPathToErrors(err, t.Name+"."+sv[i].Interface().(string)) + return false, err + } + } + result = result && resultItem + } + return result, nil + case reflect.Slice, reflect.Array: + result := true + for i := 0; i < v.Len(); i++ { + var resultItem bool + var err error + if v.Index(i).Kind() != reflect.Struct { + resultItem, err = typeCheck(v.Index(i), t, o, options) + if err != nil { + return false, err + } + } else { + resultItem, err = ValidateStruct(v.Index(i).Interface()) + if err != nil { + err = prependPathToErrors(err, t.Name+"."+strconv.Itoa(i)) + return false, err + } + } + result = result && resultItem + } + return result, nil + case reflect.Interface: + // If the value is an interface then encode its element + if v.IsNil() { + return true, nil + } + return ValidateStruct(v.Interface()) + case reflect.Ptr: + // If the value is a pointer then checks its element + if v.IsNil() { + return true, nil + } + return typeCheck(v.Elem(), t, o, options) + case reflect.Struct: + return true, nil + default: + return false, &UnsupportedTypeError{v.Type()} + } +} + +func stripParams(validatorString string) string { + return paramsRegexp.ReplaceAllString(validatorString, "") +} + +// isEmptyValue checks whether value empty or not +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.String, reflect.Array: + return v.Len() == 0 + case reflect.Map, reflect.Slice: + return v.Len() == 0 || v.IsNil() + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + + return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) +} + +// ErrorByField returns error for specified field of the struct +// validated by ValidateStruct or empty string if there are no errors +// or this field doesn't exists or doesn't have any errors. +func ErrorByField(e error, field string) string { + if e == nil { + return "" + } + return ErrorsByField(e)[field] +} + +// ErrorsByField returns map of errors of the struct validated +// by ValidateStruct or empty map if there are no errors. +func ErrorsByField(e error) map[string]string { + m := make(map[string]string) + if e == nil { + return m + } + // prototype for ValidateStruct + + switch e := e.(type) { + case Error: + m[e.Name] = e.Err.Error() + case Errors: + for _, item := range e.Errors() { + n := ErrorsByField(item) + for k, v := range n { + m[k] = v + } + } + } + + return m +} + +// Error returns string equivalent for reflect.Type +func (e *UnsupportedTypeError) Error() string { + return "validator: unsupported type: " + e.Type.String() +} + +func (sv stringValues) Len() int { return len(sv) } +func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) } +func (sv stringValues) get(i int) string { return sv[i].String() } + +func IsE164(str string) bool { + return rxE164.MatchString(str) +} diff --git a/test/tools/vendor/github.com/asaskevich/govalidator/wercker.yml b/test/tools/vendor/github.com/asaskevich/govalidator/wercker.yml new file mode 100644 index 0000000000..bc5f7b0864 --- /dev/null +++ b/test/tools/vendor/github.com/asaskevich/govalidator/wercker.yml @@ -0,0 +1,15 @@ +box: golang +build: + steps: + - setup-go-workspace + + - script: + name: go get + code: | + go version + go get -t ./... + + - script: + name: go test + code: | + go test -race -v ./... diff --git a/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/Dockerfile b/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/Dockerfile index 7181c5306f..b9fc4dfdb5 100644 --- a/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/Dockerfile +++ b/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/Dockerfile @@ -1,20 +1,14 @@ -ARG GO_VERSION=1.18 -ARG GO_IMAGE=golang:${GO_VERSION} +ARG GO_VERSION=1.21 -FROM --platform=$BUILDPLATFORM $GO_IMAGE AS build +FROM --platform=${BUILDPLATFORM} golang:${GO_VERSION} AS build COPY . /go/src/github.com/cpuguy83/go-md2man WORKDIR /go/src/github.com/cpuguy83/go-md2man -ARG TARGETOS -ARG TARGETARCH -ARG TARGETVARIANT +ARG TARGETOS TARGETARCH TARGETVARIANT RUN \ - export GOOS="${TARGETOS}"; \ - export GOARCH="${TARGETARCH}"; \ - if [ "${TARGETARCH}" = "arm" ] && [ "${TARGETVARIANT}" ]; then \ - export GOARM="${TARGETVARIANT#v}"; \ - fi; \ - CGO_ENABLED=0 go build + --mount=type=cache,target=/go/pkg/mod \ + --mount=type=cache,target=/root/.cache/go-build \ + make build FROM scratch -COPY --from=build /go/src/github.com/cpuguy83/go-md2man/go-md2man /go-md2man +COPY --from=build /go/src/github.com/cpuguy83/go-md2man/bin/go-md2man /go-md2man ENTRYPOINT ["/go-md2man"] diff --git a/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/Makefile b/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/Makefile index 437fc99979..5f4a423d6f 100644 --- a/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/Makefile +++ b/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/Makefile @@ -1,8 +1,34 @@ GO111MODULE ?= on -LINTER_BIN ?= golangci-lint export GO111MODULE +GOOS ?= $(if $(TARGETOS),$(TARGETOS),) +GOARCH ?= $(if $(TARGETARCH),$(TARGETARCH),) + +ifeq ($(TARGETARCH),amd64) +GOAMD64 ?= $(TARGETVARIANT) +endif + +ifeq ($(TARGETARCH),arm) +GOARM ?= $(TARGETVARIANT:v%=%) +endif + +ifneq ($(GOOS),) +export GOOS +endif + +ifneq ($(GOARCH),) +export GOARCH +endif + +ifneq ($(GOAMD64),) +export GOAMD64 +endif + +ifneq ($(GOARM),) +export GOARM +endif + .PHONY: build: bin/go-md2man @@ -14,22 +40,10 @@ clean: test: @go test $(TEST_FLAGS) ./... -bin/go-md2man: actual_build_flags := $(BUILD_FLAGS) -o bin/go-md2man -bin/go-md2man: bin - @CGO_ENABLED=0 go build $(actual_build_flags) - -bin: - @mkdir ./bin +bin/go-md2man: go.mod go.sum md2man/* *.go + @mkdir -p bin + CGO_ENABLED=0 go build $(BUILD_FLAGS) -o $@ .PHONY: mod mod: @go mod tidy - -.PHONY: check-mod -check-mod: # verifies that module changes for go.mod and go.sum are checked in - @hack/ci/check_mods.sh - -.PHONY: vendor -vendor: mod - @go mod vendor -v - diff --git a/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go index 4b19188d90..8a290f1972 100644 --- a/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ b/test/tools/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -1,6 +1,7 @@ package md2man import ( + "bufio" "bytes" "fmt" "io" @@ -21,34 +22,35 @@ type roffRenderer struct { } const ( - titleHeader = ".TH " - topLevelHeader = "\n\n.SH " - secondLevelHdr = "\n.SH " - otherHeader = "\n.SS " - crTag = "\n" - emphTag = "\\fI" - emphCloseTag = "\\fP" - strongTag = "\\fB" - strongCloseTag = "\\fP" - breakTag = "\n.br\n" - paraTag = "\n.PP\n" - hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" - linkTag = "\n\\[la]" - linkCloseTag = "\\[ra]" - codespanTag = "\\fB" - codespanCloseTag = "\\fR" - codeTag = "\n.EX\n" - codeCloseTag = "\n.EE\n" - quoteTag = "\n.PP\n.RS\n" - quoteCloseTag = "\n.RE\n" - listTag = "\n.RS\n" - listCloseTag = "\n.RE\n" - dtTag = "\n.TP\n" - dd2Tag = "\n" - tableStart = "\n.TS\nallbox;\n" - tableEnd = ".TE\n" - tableCellStart = "T{\n" - tableCellEnd = "\nT}\n" + titleHeader = ".TH " + topLevelHeader = "\n\n.SH " + secondLevelHdr = "\n.SH " + otherHeader = "\n.SS " + crTag = "\n" + emphTag = "\\fI" + emphCloseTag = "\\fP" + strongTag = "\\fB" + strongCloseTag = "\\fP" + breakTag = "\n.br\n" + paraTag = "\n.PP\n" + hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" + linkTag = "\n\\[la]" + linkCloseTag = "\\[ra]" + codespanTag = "\\fB" + codespanCloseTag = "\\fR" + codeTag = "\n.EX\n" + codeCloseTag = ".EE\n" // Do not prepend a newline character since code blocks, by definition, include a newline already (or at least as how blackfriday gives us on). + quoteTag = "\n.PP\n.RS\n" + quoteCloseTag = "\n.RE\n" + listTag = "\n.RS\n" + listCloseTag = "\n.RE\n" + dtTag = "\n.TP\n" + dd2Tag = "\n" + tableStart = "\n.TS\nallbox;\n" + tableEnd = ".TE\n" + tableCellStart = "T{\n" + tableCellEnd = "\nT}\n" + tablePreprocessor = `'\" t` ) // NewRoffRenderer creates a new blackfriday Renderer for generating roff documents @@ -75,6 +77,16 @@ func (r *roffRenderer) GetExtensions() blackfriday.Extensions { // RenderHeader handles outputting the header at document start func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { + // We need to walk the tree to check if there are any tables. + // If there are, we need to enable the roff table preprocessor. + ast.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + if node.Type == blackfriday.Table { + out(w, tablePreprocessor+"\n") + return blackfriday.Terminate + } + return blackfriday.GoToNext + }) + // disable hyphenation out(w, ".nh\n") } @@ -322,6 +334,28 @@ func out(w io.Writer, output string) { } func escapeSpecialChars(w io.Writer, text []byte) { + scanner := bufio.NewScanner(bytes.NewReader(text)) + + // count the number of lines in the text + // we need to know this to avoid adding a newline after the last line + n := bytes.Count(text, []byte{'\n'}) + idx := 0 + + for scanner.Scan() { + dt := scanner.Bytes() + if idx < n { + idx++ + dt = append(dt, '\n') + } + escapeSpecialCharsLine(w, dt) + } + + if err := scanner.Err(); err != nil { + panic(err) + } +} + +func escapeSpecialCharsLine(w io.Writer, text []byte) { for i := 0; i < len(text); i++ { // escape initial apostrophe or period if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { diff --git a/test/tools/vendor/github.com/felixge/httpsnoop/.gitignore b/test/tools/vendor/github.com/felixge/httpsnoop/.gitignore new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/tools/vendor/github.com/felixge/httpsnoop/.travis.yml b/test/tools/vendor/github.com/felixge/httpsnoop/.travis.yml new file mode 100644 index 0000000000..bfc421200d --- /dev/null +++ b/test/tools/vendor/github.com/felixge/httpsnoop/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: + - 1.6 + - 1.7 + - 1.8 diff --git a/test/tools/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/test/tools/vendor/github.com/felixge/httpsnoop/LICENSE.txt new file mode 100644 index 0000000000..e028b46a9b --- /dev/null +++ b/test/tools/vendor/github.com/felixge/httpsnoop/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com) + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. diff --git a/test/tools/vendor/github.com/felixge/httpsnoop/Makefile b/test/tools/vendor/github.com/felixge/httpsnoop/Makefile new file mode 100644 index 0000000000..2d84889aed --- /dev/null +++ b/test/tools/vendor/github.com/felixge/httpsnoop/Makefile @@ -0,0 +1,10 @@ +.PHONY: ci generate clean + +ci: clean generate + go test -v ./... + +generate: + go generate . + +clean: + rm -rf *_generated*.go diff --git a/test/tools/vendor/github.com/felixge/httpsnoop/README.md b/test/tools/vendor/github.com/felixge/httpsnoop/README.md new file mode 100644 index 0000000000..ddcecd13e7 --- /dev/null +++ b/test/tools/vendor/github.com/felixge/httpsnoop/README.md @@ -0,0 +1,95 @@ +# httpsnoop + +Package httpsnoop provides an easy way to capture http related metrics (i.e. +response time, bytes written, and http status code) from your application's +http.Handlers. + +Doing this requires non-trivial wrapping of the http.ResponseWriter interface, +which is also exposed for users interested in a more low-level API. + +[![GoDoc](https://godoc.org/github.com/felixge/httpsnoop?status.svg)](https://godoc.org/github.com/felixge/httpsnoop) +[![Build Status](https://travis-ci.org/felixge/httpsnoop.svg?branch=master)](https://travis-ci.org/felixge/httpsnoop) + +## Usage Example + +```go +// myH is your app's http handler, perhaps a http.ServeMux or similar. +var myH http.Handler +// wrappedH wraps myH in order to log every request. +wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + m := httpsnoop.CaptureMetrics(myH, w, r) + log.Printf( + "%s %s (code=%d dt=%s written=%d)", + r.Method, + r.URL, + m.Code, + m.Duration, + m.Written, + ) +}) +http.ListenAndServe(":8080", wrappedH) +``` + +## Why this package exists + +Instrumenting an application's http.Handler is surprisingly difficult. + +However if you google for e.g. "capture ResponseWriter status code" you'll find +lots of advise and code examples that suggest it to be a fairly trivial +undertaking. Unfortunately everything I've seen so far has a high chance of +breaking your application. + +The main problem is that a `http.ResponseWriter` often implements additional +interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and +`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter` +in your own struct that also implements the `http.ResponseWriter` interface +will hide the additional interfaces mentioned above. This has a high change of +introducing subtle bugs into any non-trivial application. + +Another approach I've seen people take is to return a struct that implements +all of the interfaces above. However, that's also problematic, because it's +difficult to fake some of these interfaces behaviors when the underlying +`http.ResponseWriter` doesn't have an implementation. It's also dangerous, +because an application may choose to operate differently, merely because it +detects the presence of these additional interfaces. + +This package solves this problem by checking which additional interfaces a +`http.ResponseWriter` implements, returning a wrapped version implementing the +exact same set of interfaces. + +Additionally this package properly handles edge cases such as `WriteHeader` not +being called, or called more than once, as well as concurrent calls to +`http.ResponseWriter` methods, and even calls happening after the wrapped +`ServeHTTP` has already returned. + +Unfortunately this package is not perfect either. It's possible that it is +still missing some interfaces provided by the go core (let me know if you find +one), and it won't work for applications adding their own interfaces into the +mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying +`http.ResponseWriter` and type-assert the result to its other interfaces. + +However, hopefully the explanation above has sufficiently scared you of rolling +your own solution to this problem. httpsnoop may still break your application, +but at least it tries to avoid it as much as possible. + +Anyway, the real problem here is that smuggling additional interfaces inside +`http.ResponseWriter` is a problematic design choice, but it probably goes as +deep as the Go language specification itself. But that's okay, I still prefer +Go over the alternatives ;). + +## Performance + +``` +BenchmarkBaseline-8 20000 94912 ns/op +BenchmarkCaptureMetrics-8 20000 95461 ns/op +``` + +As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an +overhead of ~500 ns per http request on my machine. However, the margin of +error appears to be larger than that, therefor it should be reasonable to +assume that the overhead introduced by `CaptureMetrics` is absolutely +negligible. + +## License + +MIT diff --git a/test/tools/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/test/tools/vendor/github.com/felixge/httpsnoop/capture_metrics.go new file mode 100644 index 0000000000..b77cc7c009 --- /dev/null +++ b/test/tools/vendor/github.com/felixge/httpsnoop/capture_metrics.go @@ -0,0 +1,86 @@ +package httpsnoop + +import ( + "io" + "net/http" + "time" +) + +// Metrics holds metrics captured from CaptureMetrics. +type Metrics struct { + // Code is the first http response code passed to the WriteHeader func of + // the ResponseWriter. If no such call is made, a default code of 200 is + // assumed instead. + Code int + // Duration is the time it took to execute the handler. + Duration time.Duration + // Written is the number of bytes successfully written by the Write or + // ReadFrom function of the ResponseWriter. ResponseWriters may also write + // data to their underlaying connection directly (e.g. headers), but those + // are not tracked. Therefor the number of Written bytes will usually match + // the size of the response body. + Written int64 +} + +// CaptureMetrics wraps the given hnd, executes it with the given w and r, and +// returns the metrics it captured from it. +func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics { + return CaptureMetricsFn(w, func(ww http.ResponseWriter) { + hnd.ServeHTTP(ww, r) + }) +} + +// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the +// resulting metrics. This is very similar to CaptureMetrics (which is just +// sugar on top of this func), but is a more usable interface if your +// application doesn't use the Go http.Handler interface. +func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics { + m := Metrics{Code: http.StatusOK} + m.CaptureMetrics(w, fn) + return m +} + +// CaptureMetrics wraps w and calls fn with the wrapped w and updates +// Metrics m with the resulting metrics. This is similar to CaptureMetricsFn, +// but allows one to customize starting Metrics object. +func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWriter)) { + var ( + start = time.Now() + headerWritten bool + hooks = Hooks{ + WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc { + return func(code int) { + next(code) + + if !headerWritten { + m.Code = code + headerWritten = true + } + } + }, + + Write: func(next WriteFunc) WriteFunc { + return func(p []byte) (int, error) { + n, err := next(p) + + m.Written += int64(n) + headerWritten = true + return n, err + } + }, + + ReadFrom: func(next ReadFromFunc) ReadFromFunc { + return func(src io.Reader) (int64, error) { + n, err := next(src) + + headerWritten = true + m.Written += n + return n, err + } + }, + } + ) + + fn(Wrap(w, hooks)) + m.Duration += time.Since(start) +} diff --git a/test/tools/vendor/github.com/felixge/httpsnoop/docs.go b/test/tools/vendor/github.com/felixge/httpsnoop/docs.go new file mode 100644 index 0000000000..203c35b3c6 --- /dev/null +++ b/test/tools/vendor/github.com/felixge/httpsnoop/docs.go @@ -0,0 +1,10 @@ +// Package httpsnoop provides an easy way to capture http related metrics (i.e. +// response time, bytes written, and http status code) from your application's +// http.Handlers. +// +// Doing this requires non-trivial wrapping of the http.ResponseWriter +// interface, which is also exposed for users interested in a more low-level +// API. +package httpsnoop + +//go:generate go run codegen/main.go diff --git a/test/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/test/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go new file mode 100644 index 0000000000..31cbdfb8ef --- /dev/null +++ b/test/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go @@ -0,0 +1,436 @@ +// +build go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// PushFunc is part of the http.Pusher interface. +type PushFunc func(target string, opts *http.PushOptions) error + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc + Push func(PushFunc) PushFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// - http.Pusher +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + _, i4 := w.(http.Pusher) + switch { + // combination 1/32 + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/32 + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Pusher + }{rw, rw, rw} + // combination 3/32 + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 4/32 + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw} + // combination 5/32 + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 6/32 + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + http.Pusher + }{rw, rw, rw, rw} + // combination 7/32 + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 8/32 + case !i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 9/32 + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 10/32 + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw} + // combination 11/32 + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 12/32 + case !i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 13/32 + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 14/32 + case !i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 15/32 + case !i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 16/32 + case !i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 17/32 + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 18/32 + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Pusher + }{rw, rw, rw, rw} + // combination 19/32 + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 20/32 + case i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 21/32 + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 22/32 + case i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 23/32 + case i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 24/32 + case i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 25/32 + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 26/32 + case i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 27/32 + case i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 28/32 + case i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 29/32 + case i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 30/32 + case i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 31/32 + case i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + // combination 32/32 + case i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +func (w *rw) Push(target string, opts *http.PushOptions) error { + f := w.w.(http.Pusher).Push + if w.h.Push != nil { + f = w.h.Push(f) + } + return f(target, opts) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/test/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/test/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go new file mode 100644 index 0000000000..ab99c07c7a --- /dev/null +++ b/test/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go @@ -0,0 +1,278 @@ +// +build !go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + switch { + // combination 1/16 + case !i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/16 + case !i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 3/16 + case !i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 4/16 + case !i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 5/16 + case !i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 6/16 + case !i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 7/16 + case !i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 8/16 + case !i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 9/16 + case i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 10/16 + case i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 11/16 + case i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 12/16 + case i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 13/16 + case i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 14/16 + case i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 15/16 + case i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 16/16 + case i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/.editorconfig b/test/tools/vendor/github.com/fsnotify/fsnotify/.editorconfig new file mode 100644 index 0000000000..fad895851e --- /dev/null +++ b/test/tools/vendor/github.com/fsnotify/fsnotify/.editorconfig @@ -0,0 +1,12 @@ +root = true + +[*.go] +indent_style = tab +indent_size = 4 +insert_final_newline = true + +[*.{yml,yaml}] +indent_style = space +indent_size = 2 +insert_final_newline = true +trim_trailing_whitespace = true diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/.gitattributes b/test/tools/vendor/github.com/fsnotify/fsnotify/.gitattributes new file mode 100644 index 0000000000..32f1001be0 --- /dev/null +++ b/test/tools/vendor/github.com/fsnotify/fsnotify/.gitattributes @@ -0,0 +1 @@ +go.sum linguist-generated diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/.gitignore b/test/tools/vendor/github.com/fsnotify/fsnotify/.gitignore new file mode 100644 index 0000000000..1d89d85ce4 --- /dev/null +++ b/test/tools/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -0,0 +1,6 @@ +# go test -c output +*.test +*.test.exe + +# Output of go build ./cmd/fsnotify +/fsnotify diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/.mailmap b/test/tools/vendor/github.com/fsnotify/fsnotify/.mailmap new file mode 100644 index 0000000000..a04f2907fe --- /dev/null +++ b/test/tools/vendor/github.com/fsnotify/fsnotify/.mailmap @@ -0,0 +1,2 @@ +Chris Howey +Nathan Youngman <4566+nathany@users.noreply.github.com> diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/test/tools/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md new file mode 100644 index 0000000000..77f9593bd5 --- /dev/null +++ b/test/tools/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -0,0 +1,470 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +Nothing yet. + +## [1.6.0] - 2022-10-13 + +This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, +but not documented). It also increases the minimum Linux version to 2.6.32. + +### Additions + +- all: add `Event.Has()` and `Op.Has()` ([#477]) + + This makes checking events a lot easier; for example: + + if event.Op&Write == Write && !(event.Op&Remove == Remove) { + } + + Becomes: + + if event.Has(Write) && !event.Has(Remove) { + } + +- all: add cmd/fsnotify ([#463]) + + A command-line utility for testing and some examples. + +### Changes and fixes + +- inotify: don't ignore events for files that don't exist ([#260], [#470]) + + Previously the inotify watcher would call `os.Lstat()` to check if a file + still exists before emitting events. + + This was inconsistent with other platforms and resulted in inconsistent event + reporting (e.g. when a file is quickly removed and re-created), and generally + a source of confusion. It was added in 2013 to fix a memory leak that no + longer exists. + +- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's + not watched ([#460]) + +- inotify: replace epoll() with non-blocking inotify ([#434]) + + Non-blocking inotify was not generally available at the time this library was + written in 2014, but now it is. As a result, the minimum Linux version is + bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster. + +- kqueue: don't check for events every 100ms ([#480]) + + The watcher would wake up every 100ms, even when there was nothing to do. Now + it waits until there is something to do. + +- macos: retry opening files on EINTR ([#475]) + +- kqueue: skip unreadable files ([#479]) + + kqueue requires a file descriptor for every file in a directory; this would + fail if a file was unreadable by the current user. Now these files are simply + skipped. + +- windows: fix renaming a watched directory if the parent is also watched ([#370]) + +- windows: increase buffer size from 4K to 64K ([#485]) + +- windows: close file handle on Remove() ([#288]) + +- kqueue: put pathname in the error if watching a file fails ([#471]) + +- inotify, windows: calling Close() more than once could race ([#465]) + +- kqueue: improve Close() performance ([#233]) + +- all: various documentation additions and clarifications. + +[#233]: https://github.com/fsnotify/fsnotify/pull/233 +[#260]: https://github.com/fsnotify/fsnotify/pull/260 +[#288]: https://github.com/fsnotify/fsnotify/pull/288 +[#370]: https://github.com/fsnotify/fsnotify/pull/370 +[#434]: https://github.com/fsnotify/fsnotify/pull/434 +[#460]: https://github.com/fsnotify/fsnotify/pull/460 +[#463]: https://github.com/fsnotify/fsnotify/pull/463 +[#465]: https://github.com/fsnotify/fsnotify/pull/465 +[#470]: https://github.com/fsnotify/fsnotify/pull/470 +[#471]: https://github.com/fsnotify/fsnotify/pull/471 +[#475]: https://github.com/fsnotify/fsnotify/pull/475 +[#477]: https://github.com/fsnotify/fsnotify/pull/477 +[#479]: https://github.com/fsnotify/fsnotify/pull/479 +[#480]: https://github.com/fsnotify/fsnotify/pull/480 +[#485]: https://github.com/fsnotify/fsnotify/pull/485 + +## [1.5.4] - 2022-04-25 + +* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447) +* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444) +* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443) + +## [1.5.3] - 2022-04-22 + +* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445) + +## [1.5.2] - 2022-04-21 + +* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374) +* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361) +* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424) +* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406) +* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416) + +## [1.5.1] - 2021-08-24 + +* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394) + +## [1.5.0] - 2021-08-20 + +* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381) +* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298) +* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289) +* CI: Use GitHub Actions for CI and cover go 1.12-1.17 + [#378](https://github.com/fsnotify/fsnotify/pull/378) + [#381](https://github.com/fsnotify/fsnotify/pull/381) + [#385](https://github.com/fsnotify/fsnotify/pull/385) +* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) + +## [1.4.9] - 2020-03-11 + +* Move example usage to the readme #329. This may resolve #328. + +## [1.4.8] - 2020-03-10 + +* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216) +* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265) +* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266) +* CI: Less verbosity (@nathany #267) +* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267) +* Tests: Check if channels are closed in the example (@alexeykazakov #244) +* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284) +* CI: Add windows to travis matrix (@cpuguy83 #284) +* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93) +* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219) +* Linux: open files with close-on-exec (@linxiulei #273) +* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 ) +* Project: Add go.mod (@nathany #309) +* Project: Revise editor config (@nathany #309) +* Project: Update copyright for 2019 (@nathany #309) +* CI: Drop go1.8 from CI matrix (@nathany #309) +* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e ) + +## [1.4.7] - 2018-01-09 + +* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) +* Tests: Fix missing verb on format string (thanks @rchiossi) +* Linux: Fix deadlock in Remove (thanks @aarondl) +* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) +* Docs: Moved FAQ into the README (thanks @vahe) +* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) +* Docs: replace references to OS X with macOS + +## [1.4.2] - 2016-10-10 + +* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) + +## [1.4.1] - 2016-10-04 + +* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) + +## [1.4.0] - 2016-10-01 + +* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) + +## [1.3.1] - 2016-06-28 + +* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) + +## [1.3.0] - 2016-04-19 + +* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) + +## [1.2.10] - 2016-03-02 + +* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) + +## [1.2.9] - 2016-01-13 + +kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) + +## [1.2.8] - 2015-12-17 + +* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) +* inotify: fix race in test +* enable race detection for continuous integration (Linux, Mac, Windows) + +## [1.2.5] - 2015-10-17 + +* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) +* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) +* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) +* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) + +## [1.2.1] - 2015-10-14 + +* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) + +## [1.2.0] - 2015-02-08 + +* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) +* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) +* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) + +## [1.1.1] - 2015-02-05 + +* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) + +## [1.1.0] - 2014-12-12 + +* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) + * add low-level functions + * only need to store flags on directories + * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) + * done can be an unbuffered channel + * remove calls to os.NewSyscallError +* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) +* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## [1.0.4] - 2014-09-07 + +* kqueue: add dragonfly to the build tags. +* Rename source code files, rearrange code so exported APIs are at the top. +* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) + +## [1.0.3] - 2014-08-19 + +* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) + +## [1.0.2] - 2014-08-17 + +* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Fix] Make ./path and path equivalent. (thanks @zhsso) + +## [1.0.0] - 2014-08-15 + +* [API] Remove AddWatch on Windows, use Add. +* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) +* Minor updates based on feedback from golint. + +## dev / 2014-07-09 + +* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). +* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) + +## dev / 2014-07-04 + +* kqueue: fix incorrect mutex used in Close() +* Update example to demonstrate usage of Op. + +## dev / 2014-06-28 + +* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) +* Fix for String() method on Event (thanks Alex Brainman) +* Don't build on Plan 9 or Solaris (thanks @4ad) + +## dev / 2014-06-21 + +* Events channel of type Event rather than *Event. +* [internal] use syscall constants directly for inotify and kqueue. +* [internal] kqueue: rename events to kevents and fileEvent to event. + +## dev / 2014-06-19 + +* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). +* [internal] remove cookie from Event struct (unused). +* [internal] Event struct has the same definition across every OS. +* [internal] remove internal watch and removeWatch methods. + +## dev / 2014-06-12 + +* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). +* [API] Pluralized channel names: Events and Errors. +* [API] Renamed FileEvent struct to Event. +* [API] Op constants replace methods like IsCreate(). + +## dev / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## dev / 2014-05-23 + +* [API] Remove current implementation of WatchFlags. + * current implementation doesn't take advantage of OS for efficiency + * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes + * no tests for the current implementation + * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) + +## [0.9.3] - 2014-12-31 + +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## [0.9.2] - 2014-08-17 + +* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) + +## [0.9.1] - 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## [0.9.0] - 2014-01-17 + +* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) +* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) +* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. + +## [0.8.12] - 2013-11-13 + +* [API] Remove FD_SET and friends from Linux adapter + +## [0.8.11] - 2013-11-02 + +* [Doc] Add Changelog [#72][] (thanks @nathany) +* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) + +## [0.8.10] - 2013-10-19 + +* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) +* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) +* [Doc] specify OS-specific limits in README (thanks @debrando) + +## [0.8.9] - 2013-09-08 + +* [Doc] Contributing (thanks @nathany) +* [Doc] update package path in example code [#63][] (thanks @paulhammond) +* [Doc] GoCI badge in README (Linux only) [#60][] +* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) + +## [0.8.8] - 2013-06-17 + +* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) + +## [0.8.7] - 2013-06-03 + +* [API] Make syscall flags internal +* [Fix] inotify: ignore event changes +* [Fix] race in symlink test [#45][] (reported by @srid) +* [Fix] tests on Windows +* lower case error messages + +## [0.8.6] - 2013-05-23 + +* kqueue: Use EVT_ONLY flag on Darwin +* [Doc] Update README with full example + +## [0.8.5] - 2013-05-09 + +* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) + +## [0.8.4] - 2013-04-07 + +* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) + +## [0.8.3] - 2013-03-13 + +* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) +* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) + +## [0.8.2] - 2013-02-07 + +* [Doc] add Authors +* [Fix] fix data races for map access [#29][] (thanks @fsouza) + +## [0.8.1] - 2013-01-09 + +* [Fix] Windows path separators +* [Doc] BSD License + +## [0.8.0] - 2012-11-09 + +* kqueue: directory watching improvements (thanks @vmirage) +* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) +* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) + +## [0.7.4] - 2012-10-09 + +* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) +* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) +* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) +* [Fix] kqueue: modify after recreation of file + +## [0.7.3] - 2012-09-27 + +* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) +* [Fix] kqueue: no longer get duplicate CREATE events + +## [0.7.2] - 2012-09-01 + +* kqueue: events for created directories + +## [0.7.1] - 2012-07-14 + +* [Fix] for renaming files + +## [0.7.0] - 2012-07-02 + +* [Feature] FSNotify flags +* [Fix] inotify: Added file name back to event path + +## [0.6.0] - 2012-06-06 + +* kqueue: watch files after directory created (thanks @tmc) + +## [0.5.1] - 2012-05-22 + +* [Fix] inotify: remove all watches before Close() + +## [0.5.0] - 2012-05-03 + +* [API] kqueue: return errors during watch instead of sending over channel +* kqueue: match symlink behavior on Linux +* inotify: add `DELETE_SELF` (requested by @taralx) +* [Fix] kqueue: handle EINTR (reported by @robfig) +* [Doc] Godoc example [#1][] (thanks @davecheney) + +## [0.4.0] - 2012-03-30 + +* Go 1 released: build with go tool +* [Feature] Windows support using winfsnotify +* Windows does not have attribute change notifications +* Roll attribute notifications into IsModify + +## [0.3.0] - 2012-02-19 + +* kqueue: add files when watch directory + +## [0.2.0] - 2011-12-30 + +* update to latest Go weekly code + +## [0.1.0] - 2011-10-19 + +* kqueue: add watch on file creation to match inotify +* kqueue: create file event +* inotify: ignore `IN_IGNORED` events +* event String() +* linux: common FileEvent functions +* initial commit + +[#79]: https://github.com/howeyc/fsnotify/pull/79 +[#77]: https://github.com/howeyc/fsnotify/pull/77 +[#72]: https://github.com/howeyc/fsnotify/issues/72 +[#71]: https://github.com/howeyc/fsnotify/issues/71 +[#70]: https://github.com/howeyc/fsnotify/issues/70 +[#63]: https://github.com/howeyc/fsnotify/issues/63 +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#60]: https://github.com/howeyc/fsnotify/issues/60 +[#59]: https://github.com/howeyc/fsnotify/issues/59 +[#49]: https://github.com/howeyc/fsnotify/issues/49 +[#45]: https://github.com/howeyc/fsnotify/issues/45 +[#40]: https://github.com/howeyc/fsnotify/issues/40 +[#36]: https://github.com/howeyc/fsnotify/issues/36 +[#33]: https://github.com/howeyc/fsnotify/issues/33 +[#29]: https://github.com/howeyc/fsnotify/issues/29 +[#25]: https://github.com/howeyc/fsnotify/issues/25 +[#24]: https://github.com/howeyc/fsnotify/issues/24 +[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/test/tools/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md new file mode 100644 index 0000000000..ea379759d5 --- /dev/null +++ b/test/tools/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -0,0 +1,26 @@ +Thank you for your interest in contributing to fsnotify! We try to review and +merge PRs in a reasonable timeframe, but please be aware that: + +- To avoid "wasted" work, please discus changes on the issue tracker first. You + can just send PRs, but they may end up being rejected for one reason or the + other. + +- fsnotify is a cross-platform library, and changes must work reasonably well on + all supported platforms. + +- Changes will need to be compatible; old code should still compile, and the + runtime behaviour can't change in ways that are likely to lead to problems for + users. + +Testing +------- +Just `go test ./...` runs all the tests; the CI runs this on all supported +platforms. Testing different platforms locally can be done with something like +[goon] or [Vagrant], but this isn't super-easy to set up at the moment. + +Use the `-short` flag to make the "stress test" run faster. + + +[goon]: https://github.com/arp242/goon +[Vagrant]: https://www.vagrantup.com/ +[integration_test.go]: /integration_test.go diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/LICENSE b/test/tools/vendor/github.com/fsnotify/fsnotify/LICENSE new file mode 100644 index 0000000000..fb03ade750 --- /dev/null +++ b/test/tools/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -0,0 +1,25 @@ +Copyright © 2012 The Go Authors. All rights reserved. +Copyright © fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +* Neither the name of Google Inc. nor the names of its contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/README.md b/test/tools/vendor/github.com/fsnotify/fsnotify/README.md new file mode 100644 index 0000000000..d4e6080feb --- /dev/null +++ b/test/tools/vendor/github.com/fsnotify/fsnotify/README.md @@ -0,0 +1,161 @@ +fsnotify is a Go library to provide cross-platform filesystem notifications on +Windows, Linux, macOS, and BSD systems. + +Go 1.16 or newer is required; the full documentation is at +https://pkg.go.dev/github.com/fsnotify/fsnotify + +**It's best to read the documentation at pkg.go.dev, as it's pinned to the last +released version, whereas this README is for the last development version which +may include additions/changes.** + +--- + +Platform support: + +| Adapter | OS | Status | +| --------------------- | ---------------| -------------------------------------------------------------| +| inotify | Linux 2.6.32+ | Supported | +| kqueue | BSD, macOS | Supported | +| ReadDirectoryChangesW | Windows | Supported | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | +| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | + +Linux and macOS should include Android and iOS, but these are currently untested. + +Usage +----- +A basic example: + +```go +package main + +import ( + "log" + + "github.com/fsnotify/fsnotify" +) + +func main() { + // Create new watcher. + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + // Start listening for events. + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Has(fsnotify.Write) { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + // Add a path. + err = watcher.Add("/tmp") + if err != nil { + log.Fatal(err) + } + + // Block main goroutine forever. + <-make(chan struct{}) +} +``` + +Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be +run with: + + % go run ./cmd/fsnotify + +FAQ +--- +### Will a file still be watched when it's moved to another directory? +No, not unless you are watching the location it was moved to. + +### Are subdirectories watched too? +No, you must add watches for any directory you want to watch (a recursive +watcher is on the roadmap: [#18]). + +[#18]: https://github.com/fsnotify/fsnotify/issues/18 + +### Do I have to watch the Error and Event channels in a goroutine? +As of now, yes (you can read both channels in the same goroutine using `select`, +you don't need a separate goroutine for both channels; see the example). + +### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? +fsnotify requires support from underlying OS to work. The current NFS and SMB +protocols does not provide network level support for file notifications, and +neither do the /proc and /sys virtual filesystems. + +This could be fixed with a polling watcher ([#9]), but it's not yet implemented. + +[#9]: https://github.com/fsnotify/fsnotify/issues/9 + +Platform-specific notes +----------------------- +### Linux +When a file is removed a REMOVE event won't be emitted until all file +descriptors are closed; it will emit a CHMOD instead: + + fp := os.Open("file") + os.Remove("file") // CHMOD + fp.Close() // REMOVE + +This is the event that inotify sends, so not much can be changed about this. + +The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for +the number of watches per user, and `fs.inotify.max_user_instances` specifies +the maximum number of inotify instances per user. Every Watcher you create is an +"instance", and every path you add is a "watch". + +These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and +`/proc/sys/fs/inotify/max_user_instances` + +To increase them you can use `sysctl` or write the value to proc file: + + # The default values on Linux 5.18 + sysctl fs.inotify.max_user_watches=124983 + sysctl fs.inotify.max_user_instances=128 + +To make the changes persist on reboot edit `/etc/sysctl.conf` or +`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your +distro's documentation): + + fs.inotify.max_user_watches=124983 + fs.inotify.max_user_instances=128 + +Reaching the limit will result in a "no space left on device" or "too many open +files" error. + +### kqueue (macOS, all BSD systems) +kqueue requires opening a file descriptor for every file that's being watched; +so if you're watching a directory with five files then that's six file +descriptors. You will run in to your system's "max open files" limit faster on +these platforms. + +The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to +control the maximum number of open files. + +### macOS +Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary +workaround is to add your folder(s) to the *Spotlight Privacy settings* until we +have a native FSEvents implementation (see [#11]). + +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#15]: https://github.com/fsnotify/fsnotify/issues/15 diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_fen.go new file mode 100644 index 0000000000..1a95ad8e7c --- /dev/null +++ b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -0,0 +1,162 @@ +//go:build solaris +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_inotify.go new file mode 100644 index 0000000000..54c77fbb0e --- /dev/null +++ b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -0,0 +1,459 @@ +//go:build linux +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + // Store fd here as os.File.Read() will no longer return on close after + // calling Fd(). See: https://github.com/golang/go/issues/26439 + fd int + mu sync.Mutex // Map access + inotifyFile *os.File + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + // Need to set the FD to nonblocking mode in order for SetDeadline methods to work + // Otherwise, blocking i/o operations won't terminate on close + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) + if fd == -1 { + return nil, errno + } + + w := &Watcher{ + fd: fd, + inotifyFile: os.NewFile(uintptr(fd), ""), + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + } + return false +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + return false + } +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed() { + w.mu.Unlock() + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + w.mu.Unlock() + + // Causes any blocking reads to return with an error, provided the file + // still supports deadline operations. + err := w.inotifyFile.Close() + if err != nil { + return err + } + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case; + // The only two possible errors are: + // + // - EBADF, which happens when w.fd is not a valid file descriptor + // of any kind. + // - EINVAL, which is when fd is not an inotify descriptor or wd + // is not a valid watch descriptor. Watch descriptors are + // invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they + // are watching is deleted. + return errno + } + + return nil +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + defer func() { + close(w.doneResp) + close(w.Errors) + close(w.Events) + }() + + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + errno error // Syscall errno + ) + for { + // See if we have been closed. + if w.isClosed() { + return + } + + n, err := w.inotifyFile.Read(buf[:]) + switch { + case errors.Unwrap(err) == os.ErrClosed: + return + case err != nil: + if !w.sendError(err) { + return + } + continue + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + if !w.sendError(err) { + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + var ( + // Point "raw" to the event in the buffer + raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + mask = uint32(raw.Mask) + nameLen = uint32(raw.Len) + ) + + if mask&unix.IN_Q_OVERFLOW != 0 { + if !w.sendError(ErrEventOverflow) { + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := w.newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if mask&unix.IN_IGNORED == 0 { + if !w.sendEvent(event) { + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go new file mode 100644 index 0000000000..29087469bf --- /dev/null +++ b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -0,0 +1,707 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + done chan struct{} + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing. + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Watched file descriptors (key: path). + watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). + userWatches map[string]struct{} // Watches added with Watcher.Add() + dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. + paths map[int]pathInfo // File descriptors to path names for processing kqueue events. + fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + kq, closepipe, err := newKqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + closepipe: closepipe, + watches: make(map[string]int), + watchesByDir: make(map[string]map[int]struct{}), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]struct{}), + userWatches: make(map[string]struct{}), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// newKqueue creates a new kernel event queue and returns a descriptor. +// +// This registers a new event on closepipe, which will trigger an event when +// it's closed. This way we can use kevent() without timeout/polling; without +// the closepipe, it would block forever and we wouldn't be able to stop it at +// all. +func newKqueue() (kq int, closepipe [2]int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, closepipe, err + } + + // Register the close pipe. + err = unix.Pipe(closepipe[:]) + if err != nil { + unix.Close(kq) + return kq, closepipe, err + } + + // Register changes to listen on the closepipe. + changes := make([]unix.Kevent_t, 1) + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ, + unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT) + + ok, err := unix.Kevent(kq, changes, nil, nil) + if ok == -1 { + unix.Close(kq) + unix.Close(closepipe[0]) + unix.Close(closepipe[1]) + return kq, closepipe, err + } + return kq, closepipe, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + } + return false +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + } + return false +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + pathsToRemove := make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() // Unlock before calling Remove, which also locks + for _, name := range pathsToRemove { + w.Remove(name) + } + + // Send "quit" message to the reader goroutine. + unix.Close(w.closepipe[1]) + close(w.done) + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.userWatches[name] = struct{}{} + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + if err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.userWatches, name) + + parentName := filepath.Dir(name) + delete(w.watchesByDir[parentName], watchfd) + + if len(w.watchesByDir[parentName]) == 0 { + delete(w.watchesByDir, parentName) + } + + delete(w.paths, watchfd) + delete(w.dirFlags, name) + delete(w.fileExists, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for fd := range w.watchesByDir[name] { + path := w.paths[fd] + if _, ok := w.userWatches[path.name]; !ok { + pathsToRemove = append(pathsToRemove, path.name) + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.userWatches)) + for pathname := range w.userWatches { + entries = append(entries, pathname) + } + + return entries +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets or named pipes + if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { + return "", nil + } + + // Follow Symlinks + // + // Linux can add unresolvable symlinks to the watch list without issue, + // and Windows can't do symlinks period. To maintain consistency, we + // will act like everything is fine if the link can't be resolved. + // There will simply be no file events for broken symlinks. Hence the + // returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + // Retry on EINTR; open() can return EINTR in practice on macOS. + // See #354, and go issues 11180 and 39237. + for { + watchfd, err = unix.Open(name, openMode, 0) + if err == nil { + break + } + if errors.Is(err, unix.EINTR) { + continue + } + + return "", err + } + + isDir = fi.IsDir() + } + + err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + if err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + parentName := filepath.Dir(name) + w.watches[name] = watchfd + + watchesByDir, ok := w.watchesByDir[parentName] + if !ok { + watchesByDir = make(map[int]struct{}, 1) + w.watchesByDir[parentName] = watchesByDir + } + watchesByDir[watchfd] = struct{}{} + + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + defer func() { + err := unix.Close(w.kq) + if err != nil { + w.Errors <- err + } + unix.Close(w.closepipe[0]) + close(w.Events) + close(w.Errors) + }() + + eventBuffer := make([]unix.Kevent_t, 10) + for closed := false; !closed; { + kevents, err := w.read(eventBuffer) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { + closed = true + } + continue + } + + // Flush the events we received to the Events channel + for _, kevent := range kevents { + var ( + watchfd = int(kevent.Ident) + mask = uint32(kevent.Fflags) + ) + + // Shut down the loop when the pipe is closed, but only after all + // other events have been processed. + if watchfd == w.closepipe[0] { + closed = true + continue + } + + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + + event := w.newEvent(path.name, mask) + + if path.isDir && !event.Has(Remove) { + // Double check to make sure the directory exists. This can + // happen when we do a rm -fr on a recursively watched folders + // and we receive a modification event first but the folder has + // been deleted and later receive the delete event. + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + event.Op |= Remove + } + } + + if event.Has(Rename) || event.Has(Remove) { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Has(Write) && !event.Has(Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + if !w.sendEvent(event) { + closed = true + continue + } + } + + if event.Has(Remove) { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + } + } +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + path := filepath.Join(dirPath, fileInfo.Name()) + + cleanPath, err := w.internalWatch(path, fileInfo) + if err != nil { + // No permission to read the file; that's not a problem: just skip. + // But do add it to w.fileExists to prevent it from being picked up + // as a "new" file later (it still shows up in the directory + // listing). + switch { + case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): + cleanPath = filepath.Clean(path) + default: + return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err) + } + } + + w.mu.Lock() + w.fileExists[cleanPath] = struct{}{} + w.mu.Unlock() + } + + return nil +} + +// Search the directory for new files and send an event for them. +// +// This functionality is to have the BSD watcher match the inotify, which sends +// a create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dir string) { + // Get all files + files, err := ioutil.ReadDir(dir) + if err != nil { + if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) { + return + } + } + + // Search for new files + for _, fi := range files { + err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + if !w.sendEvent(Event{Name: filePath, Op: Create}) { + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = struct{}{} + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// Register events with the queue. +func (w *Watcher) register(fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + for i, fd := range fds { + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // Register the events. + success, err := unix.Kevent(w.kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(w.kq, nil, events, nil) + if err != nil { + return nil, err + } + return events[0:n], nil +} diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/backend_other.go b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_other.go new file mode 100644 index 0000000000..a9bb1c3c4d --- /dev/null +++ b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -0,0 +1,66 @@ +//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows + +package fsnotify + +import ( + "fmt" + "runtime" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct{} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_windows.go new file mode 100644 index 0000000000..ae392867c0 --- /dev/null +++ b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -0,0 +1,746 @@ +//go:build windows +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + port windows.Handle // Handle to completion port + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error + + mu sync.Mutex // Protects access to watches, isClosed + watches watchMap // Map of watches (key: i-number) + isClosed bool // Set to true when Close() is first called +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) + if err != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", err) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + + event := w.newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.quit: + } + return false +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + w.mu.Unlock() + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return errors.New("watcher already closed") + } + w.mu.Unlock() + + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for _, entry := range w.watches { + for _, watchEntry := range entry { + entries = append(entries, watchEntry.path) + } + } + + return entries +} + +// These options are from the old golang.org/x/exp/winfsnotify, where you could +// add various options to the watch. This has long since been removed. +// +// The "sys" in the name is misleading as they're not part of any "system". +// +// This should all be removed at some point, and just use windows.FILE_NOTIFY_* +const ( + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + sysFSIGNORED = 0x8000 +) + +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle windows.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov windows.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [65536]byte // 64K buffer +} + +type ( + indexMap map[uint64]*watch + watchMap map[uint32]indexMap +) + +func (w *Watcher) wakeupReader() error { + err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if err != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", err) + } + return nil +} + +func (w *Watcher) getDir(pathname string) (dir string, err error) { + attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) + if err != nil { + return "", os.NewSyscallError("GetFileAttributes", err) + } + if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func (w *Watcher) getIno(path string) (ino *inode, err error) { + h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), + windows.FILE_LIST_DIRECTORY, + windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, + nil, windows.OPEN_EXISTING, + windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0) + if err != nil { + return nil, os.NewSyscallError("CreateFile", err) + } + + var fi windows.ByHandleFileInformation + err = windows.GetFileInformationByHandle(h, &fi) + if err != nil { + windows.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", err) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := w.getDir(pathname) + if err != nil { + return err + } + + ino, err := w.getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0) + if err != nil { + windows.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", err) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + windows.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + + err = w.startRead(watchEntry) + if err != nil { + return err + } + + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := w.getDir(pathname) + if err != nil { + return err + } + ino, err := w.getIno(dir) + if err != nil { + return err + } + + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + + err = windows.CloseHandle(ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + if watch == nil { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + err := windows.CancelIo(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CancelIo", err)) + w.deleteWatch(watch) + } + mask := w.toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= w.toWindowsFlags(m) + } + if mask == 0 { + err := windows.CloseHandle(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + + rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if rdErr != nil { + err := os.NewSyscallError("ReadDirectoryChanges", rdErr) + if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n uint32 + key uintptr + ov *windows.Overlapped + ) + runtime.LockOSThread() + + for { + qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) + // This error is handled after the watch == nil check below. NOTE: this + // seems odd, note sure if it's correct. + + watch := (*watch)(unsafe.Pointer(ov)) + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + + err := windows.CloseHandle(w.port) + if err != nil { + err = os.NewSyscallError("CloseHandle", err) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch qErr { + case windows.ERROR_MORE_DATA: + if watch == nil { + w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case windows.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case windows.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.sendError(errors.New("short read in readEvents()")) + break + } + + // Point "raw" to the event in the buffer + raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + + // Create a buf that is the size of the path name + size := int(raw.FileNameLength / 2) + var buf []uint16 + // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973 + sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) + sh.Len = size + sh.Cap = size + name := windows.UTF16ToString(buf) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case windows.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case windows.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case windows.FILE_ACTION_RENAMED_NEW_NAME: + // Update saved path of all sub-watches. + old := filepath.Join(watch.path, watch.rename) + w.mu.Lock() + for _, watchMap := range w.watches { + for _, ww := range watchMap { + if strings.HasPrefix(ww.path, old) { + ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old)) + } + } + } + w.mu.Unlock() + + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + w.sendEvent(fullname, watch.names[name]&mask) + } + if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == windows.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.sendError(errors.New( + "Windows system assumed buffer larger than it is, events have likely been missed.")) + break + } + } + + if err := w.startRead(watch); err != nil { + w.sendError(err) + } + } +} + +func (w *Watcher) toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSMODIFY != 0 { + m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { + switch action { + case windows.FILE_ACTION_ADDED: + return sysFSCREATE + case windows.FILE_ACTION_REMOVED: + return sysFSDELETE + case windows.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case windows.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/test/tools/vendor/github.com/fsnotify/fsnotify/fsnotify.go new file mode 100644 index 0000000000..30a5bf0f07 --- /dev/null +++ b/test/tools/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -0,0 +1,81 @@ +//go:build !plan9 +// +build !plan9 + +// Package fsnotify provides a cross-platform interface for file system +// notifications. +package fsnotify + +import ( + "errors" + "fmt" + "strings" +) + +// Event represents a file system notification. +type Event struct { + // Path to the file or directory. + // + // Paths are relative to the input; for example with Add("dir") the Name + // will be set to "dir/file" if you create that file, but if you use + // Add("/path/to/dir") it will be "/path/to/dir/file". + Name string + + // File operation that triggered the event. + // + // This is a bitmask and some systems may send multiple operations at once. + // Use the Event.Has() method instead of comparing with ==. + Op Op +} + +// Op describes a set of file operations. +type Op uint32 + +// The operations fsnotify can trigger; see the documentation on [Watcher] for a +// full description, and check them with [Event.Has]. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +// Common errors that can be reported by a watcher +var ( + ErrNonExistentWatch = errors.New("can't remove non-existent watcher") + ErrEventOverflow = errors.New("fsnotify queue overflow") +) + +func (op Op) String() string { + var b strings.Builder + if op.Has(Create) { + b.WriteString("|CREATE") + } + if op.Has(Remove) { + b.WriteString("|REMOVE") + } + if op.Has(Write) { + b.WriteString("|WRITE") + } + if op.Has(Rename) { + b.WriteString("|RENAME") + } + if op.Has(Chmod) { + b.WriteString("|CHMOD") + } + if b.Len() == 0 { + return "[no events]" + } + return b.String()[1:] +} + +// Has reports if this operation has the given operation. +func (o Op) Has(h Op) bool { return o&h == h } + +// Has reports if this event has the given operation. +func (e Event) Has(op Op) bool { return e.Op.Has(op) } + +// String returns a string representation of the event with their path. +func (e Event) String() string { + return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) +} diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/test/tools/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh new file mode 100644 index 0000000000..b09ef76834 --- /dev/null +++ b/test/tools/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh @@ -0,0 +1,208 @@ +#!/usr/bin/env zsh +[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 +setopt err_exit no_unset pipefail extended_glob + +# Simple script to update the godoc comments on all watchers. Probably took me +# more time to write this than doing it manually, but ah well 🙃 + +watcher=$(</tmp/x + print -r -- $cmt >>/tmp/x + tail -n+$(( end + 1 )) $file >>/tmp/x + mv /tmp/x $file + done +} + +set-cmt '^type Watcher struct ' $watcher +set-cmt '^func NewWatcher(' $new +set-cmt '^func (w \*Watcher) Add(' $add +set-cmt '^func (w \*Watcher) Remove(' $remove +set-cmt '^func (w \*Watcher) Close(' $close +set-cmt '^func (w \*Watcher) WatchList(' $watchlist +set-cmt '^[[:space:]]*Events *chan Event$' $events +set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/test/tools/vendor/github.com/fsnotify/fsnotify/system_bsd.go new file mode 100644 index 0000000000..4322b0b885 --- /dev/null +++ b/test/tools/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -0,0 +1,8 @@ +//go:build freebsd || openbsd || netbsd || dragonfly +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/test/tools/vendor/github.com/fsnotify/fsnotify/system_darwin.go new file mode 100644 index 0000000000..5da5ffa78f --- /dev/null +++ b/test/tools/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -0,0 +1,9 @@ +//go:build darwin +// +build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/test/tools/vendor/github.com/go-openapi/analysis/.codecov.yml b/test/tools/vendor/github.com/go-openapi/analysis/.codecov.yml new file mode 100644 index 0000000000..841c4281e2 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/.codecov.yml @@ -0,0 +1,5 @@ +coverage: + status: + patch: + default: + target: 80% diff --git a/test/tools/vendor/github.com/go-openapi/analysis/.gitattributes b/test/tools/vendor/github.com/go-openapi/analysis/.gitattributes new file mode 100644 index 0000000000..d020be8ea4 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/.gitattributes @@ -0,0 +1,2 @@ +*.go text eol=lf + diff --git a/test/tools/vendor/github.com/go-openapi/analysis/.gitignore b/test/tools/vendor/github.com/go-openapi/analysis/.gitignore new file mode 100644 index 0000000000..87c3bd3e66 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/.gitignore @@ -0,0 +1,5 @@ +secrets.yml +coverage.out +coverage.txt +*.cov +.idea diff --git a/test/tools/vendor/github.com/go-openapi/analysis/.golangci.yml b/test/tools/vendor/github.com/go-openapi/analysis/.golangci.yml new file mode 100644 index 0000000000..e24a6c14e6 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/.golangci.yml @@ -0,0 +1,56 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 40 + gocognit: + min-complexity: 40 + maligned: + suggest-new: true + dupl: + threshold: 150 + goconst: + min-len: 2 + min-occurrences: 4 + +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoglobals + - gochecknoinits + # scopelint is useful, but also reports false positives + # that unfortunately can't be disabled. So we disable the + # linter rather than changing code that works. + # see: https://github.com/kyoh86/scopelint/issues/4 + - scopelint + - godox + - gocognit + #- whitespace + - wsl + - funlen + - testpackage + - wrapcheck + #- nlreturn + - gomnd + - goerr113 + - exhaustivestruct + #- errorlint + #- nestif + - gofumpt + - godot + - gci + - dogsled + - paralleltest + - tparallel + - thelper + - ifshort + - forbidigo + - cyclop + - varnamelen + - exhaustruct + - nonamedreturns + - nosnakecase diff --git a/test/tools/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md b/test/tools/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/google.golang.org/appengine/LICENSE b/test/tools/vendor/github.com/go-openapi/analysis/LICENSE similarity index 100% rename from vendor/google.golang.org/appengine/LICENSE rename to test/tools/vendor/github.com/go-openapi/analysis/LICENSE diff --git a/test/tools/vendor/github.com/go-openapi/analysis/README.md b/test/tools/vendor/github.com/go-openapi/analysis/README.md new file mode 100644 index 0000000000..aad6da10fe --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/README.md @@ -0,0 +1,31 @@ +# OpenAPI initiative analysis + +[![Build Status](https://travis-ci.org/go-openapi/analysis.svg?branch=master)](https://travis-ci.org/go-openapi/analysis) +[![Build status](https://ci.appveyor.com/api/projects/status/x377t5o9ennm847o/branch/master?svg=true)](https://ci.appveyor.com/project/casualjim/go-openapi/analysis/branch/master) +[![codecov](https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/analysis) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/analysis.svg)](https://pkg.go.dev/github.com/go-openapi/analysis) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/analysis)](https://goreportcard.com/report/github.com/go-openapi/analysis) + + +A foundational library to analyze an OAI specification document for easier reasoning about the content. + +## What's inside? + +* A analyzer providing methods to walk the functional content of a specification +* A spec flattener producing a self-contained document bundle, while preserving `$ref`s +* A spec merger ("mixin") to merge several spec documents into a primary spec +* A spec "fixer" ensuring that response descriptions are non empty + +[Documentation](https://godoc.org/github.com/go-openapi/analysis) + +## FAQ + +* Does this library support OpenAPI 3? + +> No. +> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). +> There is no plan to make it evolve toward supporting OpenAPI 3.x. +> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. +> diff --git a/test/tools/vendor/github.com/go-openapi/analysis/analyzer.go b/test/tools/vendor/github.com/go-openapi/analysis/analyzer.go new file mode 100644 index 0000000000..c17aee1b61 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/analyzer.go @@ -0,0 +1,1064 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + slashpath "path" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +type referenceAnalysis struct { + schemas map[string]spec.Ref + responses map[string]spec.Ref + parameters map[string]spec.Ref + items map[string]spec.Ref + headerItems map[string]spec.Ref + parameterItems map[string]spec.Ref + allRefs map[string]spec.Ref + pathItems map[string]spec.Ref +} + +func (r *referenceAnalysis) addRef(key string, ref spec.Ref) { + r.allRefs["#"+key] = ref +} + +func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items, location string) { + r.items["#"+key] = items.Ref + r.addRef(key, items.Ref) + if location == "header" { + // NOTE: in swagger 2.0, headers and parameters (but not body param schemas) are simple schemas + // and $ref are not supported here. However it is possible to analyze this. + r.headerItems["#"+key] = items.Ref + } else { + r.parameterItems["#"+key] = items.Ref + } +} + +func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) { + r.schemas["#"+key] = ref.Schema.Ref + r.addRef(key, ref.Schema.Ref) +} + +func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) { + r.responses["#"+key] = resp.Ref + r.addRef(key, resp.Ref) +} + +func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) { + r.parameters["#"+key] = param.Ref + r.addRef(key, param.Ref) +} + +func (r *referenceAnalysis) addPathItemRef(key string, pathItem *spec.PathItem) { + r.pathItems["#"+key] = pathItem.Ref + r.addRef(key, pathItem.Ref) +} + +type patternAnalysis struct { + parameters map[string]string + headers map[string]string + items map[string]string + schemas map[string]string + allPatterns map[string]string +} + +func (p *patternAnalysis) addPattern(key, pattern string) { + p.allPatterns["#"+key] = pattern +} + +func (p *patternAnalysis) addParameterPattern(key, pattern string) { + p.parameters["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addHeaderPattern(key, pattern string) { + p.headers["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addItemsPattern(key, pattern string) { + p.items["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addSchemaPattern(key, pattern string) { + p.schemas["#"+key] = pattern + p.addPattern(key, pattern) +} + +type enumAnalysis struct { + parameters map[string][]interface{} + headers map[string][]interface{} + items map[string][]interface{} + schemas map[string][]interface{} + allEnums map[string][]interface{} +} + +func (p *enumAnalysis) addEnum(key string, enum []interface{}) { + p.allEnums["#"+key] = enum +} + +func (p *enumAnalysis) addParameterEnum(key string, enum []interface{}) { + p.parameters["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addHeaderEnum(key string, enum []interface{}) { + p.headers["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addItemsEnum(key string, enum []interface{}) { + p.items["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addSchemaEnum(key string, enum []interface{}) { + p.schemas["#"+key] = enum + p.addEnum(key, enum) +} + +// New takes a swagger spec object and returns an analyzed spec document. +// The analyzed document contains a number of indices that make it easier to +// reason about semantics of a swagger specification for use in code generation +// or validation etc. +func New(doc *spec.Swagger) *Spec { + a := &Spec{ + spec: doc, + references: referenceAnalysis{}, + patterns: patternAnalysis{}, + enums: enumAnalysis{}, + } + a.reset() + a.initialize() + + return a +} + +// Spec is an analyzed specification object. It takes a swagger spec object and turns it into a registry +// with a bunch of utility methods to act on the information in the spec. +type Spec struct { + spec *spec.Swagger + consumes map[string]struct{} + produces map[string]struct{} + authSchemes map[string]struct{} + operations map[string]map[string]*spec.Operation + references referenceAnalysis + patterns patternAnalysis + enums enumAnalysis + allSchemas map[string]SchemaRef + allOfs map[string]SchemaRef +} + +func (s *Spec) reset() { + s.consumes = make(map[string]struct{}, 150) + s.produces = make(map[string]struct{}, 150) + s.authSchemes = make(map[string]struct{}, 150) + s.operations = make(map[string]map[string]*spec.Operation, 150) + s.allSchemas = make(map[string]SchemaRef, 150) + s.allOfs = make(map[string]SchemaRef, 150) + s.references.schemas = make(map[string]spec.Ref, 150) + s.references.pathItems = make(map[string]spec.Ref, 150) + s.references.responses = make(map[string]spec.Ref, 150) + s.references.parameters = make(map[string]spec.Ref, 150) + s.references.items = make(map[string]spec.Ref, 150) + s.references.headerItems = make(map[string]spec.Ref, 150) + s.references.parameterItems = make(map[string]spec.Ref, 150) + s.references.allRefs = make(map[string]spec.Ref, 150) + s.patterns.parameters = make(map[string]string, 150) + s.patterns.headers = make(map[string]string, 150) + s.patterns.items = make(map[string]string, 150) + s.patterns.schemas = make(map[string]string, 150) + s.patterns.allPatterns = make(map[string]string, 150) + s.enums.parameters = make(map[string][]interface{}, 150) + s.enums.headers = make(map[string][]interface{}, 150) + s.enums.items = make(map[string][]interface{}, 150) + s.enums.schemas = make(map[string][]interface{}, 150) + s.enums.allEnums = make(map[string][]interface{}, 150) +} + +func (s *Spec) reload() { + s.reset() + s.initialize() +} + +func (s *Spec) initialize() { + for _, c := range s.spec.Consumes { + s.consumes[c] = struct{}{} + } + for _, c := range s.spec.Produces { + s.produces[c] = struct{}{} + } + for _, ss := range s.spec.Security { + for k := range ss { + s.authSchemes[k] = struct{}{} + } + } + for path, pathItem := range s.AllPaths() { + s.analyzeOperations(path, &pathItem) //#nosec + } + + for name, parameter := range s.spec.Parameters { + refPref := slashpath.Join("/parameters", jsonpointer.Escape(name)) + if parameter.Items != nil { + s.analyzeItems("items", parameter.Items, refPref, "parameter") + } + if parameter.In == "body" && parameter.Schema != nil { + s.analyzeSchema("schema", parameter.Schema, refPref) + } + if parameter.Pattern != "" { + s.patterns.addParameterPattern(refPref, parameter.Pattern) + } + if len(parameter.Enum) > 0 { + s.enums.addParameterEnum(refPref, parameter.Enum) + } + } + + for name, response := range s.spec.Responses { + refPref := slashpath.Join("/responses", jsonpointer.Escape(name)) + for k, v := range response.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + if v.Items != nil { + s.analyzeItems("items", v.Items, hRefPref, "header") + } + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + if len(v.Enum) > 0 { + s.enums.addHeaderEnum(hRefPref, v.Enum) + } + } + if response.Schema != nil { + s.analyzeSchema("schema", response.Schema, refPref) + } + } + + for name := range s.spec.Definitions { + schema := s.spec.Definitions[name] + s.analyzeSchema(name, &schema, "/definitions") + } + // TODO: after analyzing all things and flattening schemas etc + // resolve all the collected references to their final representations + // best put in a separate method because this could get expensive +} + +func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) { + // TODO: resolve refs here? + // Currently, operations declared via pathItem $ref are known only after expansion + op := pi + if pi.Ref.String() != "" { + key := slashpath.Join("/paths", jsonpointer.Escape(path)) + s.references.addPathItemRef(key, pi) + } + s.analyzeOperation("GET", path, op.Get) + s.analyzeOperation("PUT", path, op.Put) + s.analyzeOperation("POST", path, op.Post) + s.analyzeOperation("PATCH", path, op.Patch) + s.analyzeOperation("DELETE", path, op.Delete) + s.analyzeOperation("HEAD", path, op.Head) + s.analyzeOperation("OPTIONS", path, op.Options) + for i, param := range op.Parameters { + refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i)) + if param.Ref.String() != "" { + s.references.addParamRef(refPref, ¶m) //#nosec + } + if param.Pattern != "" { + s.patterns.addParameterPattern(refPref, param.Pattern) + } + if len(param.Enum) > 0 { + s.enums.addParameterEnum(refPref, param.Enum) + } + if param.Items != nil { + s.analyzeItems("items", param.Items, refPref, "parameter") + } + if param.Schema != nil { + s.analyzeSchema("schema", param.Schema, refPref) + } + } +} + +func (s *Spec) analyzeItems(name string, items *spec.Items, prefix, location string) { + if items == nil { + return + } + refPref := slashpath.Join(prefix, name) + s.analyzeItems(name, items.Items, refPref, location) + if items.Ref.String() != "" { + s.references.addItemsRef(refPref, items, location) + } + if items.Pattern != "" { + s.patterns.addItemsPattern(refPref, items.Pattern) + } + if len(items.Enum) > 0 { + s.enums.addItemsEnum(refPref, items.Enum) + } +} + +func (s *Spec) analyzeParameter(prefix string, i int, param spec.Parameter) { + refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i)) + if param.Ref.String() != "" { + s.references.addParamRef(refPref, ¶m) //#nosec + } + + if param.Pattern != "" { + s.patterns.addParameterPattern(refPref, param.Pattern) + } + + if len(param.Enum) > 0 { + s.enums.addParameterEnum(refPref, param.Enum) + } + + s.analyzeItems("items", param.Items, refPref, "parameter") + if param.In == "body" && param.Schema != nil { + s.analyzeSchema("schema", param.Schema, refPref) + } +} + +func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) { + if op == nil { + return + } + + for _, c := range op.Consumes { + s.consumes[c] = struct{}{} + } + + for _, c := range op.Produces { + s.produces[c] = struct{}{} + } + + for _, ss := range op.Security { + for k := range ss { + s.authSchemes[k] = struct{}{} + } + } + + if _, ok := s.operations[method]; !ok { + s.operations[method] = make(map[string]*spec.Operation) + } + + s.operations[method][path] = op + prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method)) + for i, param := range op.Parameters { + s.analyzeParameter(prefix, i, param) + } + + if op.Responses == nil { + return + } + + if op.Responses.Default != nil { + s.analyzeDefaultResponse(prefix, op.Responses.Default) + } + + for k, res := range op.Responses.StatusCodeResponses { + s.analyzeResponse(prefix, k, res) + } +} + +func (s *Spec) analyzeDefaultResponse(prefix string, res *spec.Response) { + refPref := slashpath.Join(prefix, "responses", "default") + if res.Ref.String() != "" { + s.references.addResponseRef(refPref, res) + } + + for k, v := range res.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + s.analyzeItems("items", v.Items, hRefPref, "header") + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + } + + if res.Schema != nil { + s.analyzeSchema("schema", res.Schema, refPref) + } +} + +func (s *Spec) analyzeResponse(prefix string, k int, res spec.Response) { + refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k)) + if res.Ref.String() != "" { + s.references.addResponseRef(refPref, &res) //#nosec + } + + for k, v := range res.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + s.analyzeItems("items", v.Items, hRefPref, "header") + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + + if len(v.Enum) > 0 { + s.enums.addHeaderEnum(hRefPref, v.Enum) + } + } + + if res.Schema != nil { + s.analyzeSchema("schema", res.Schema, refPref) + } +} + +func (s *Spec) analyzeSchema(name string, schema *spec.Schema, prefix string) { + refURI := slashpath.Join(prefix, jsonpointer.Escape(name)) + schRef := SchemaRef{ + Name: name, + Schema: schema, + Ref: spec.MustCreateRef("#" + refURI), + TopLevel: prefix == "/definitions", + } + + s.allSchemas["#"+refURI] = schRef + + if schema.Ref.String() != "" { + s.references.addSchemaRef(refURI, schRef) + } + + if schema.Pattern != "" { + s.patterns.addSchemaPattern(refURI, schema.Pattern) + } + + if len(schema.Enum) > 0 { + s.enums.addSchemaEnum(refURI, schema.Enum) + } + + for k, v := range schema.Definitions { + v := v + s.analyzeSchema(k, &v, slashpath.Join(refURI, "definitions")) + } + + for k, v := range schema.Properties { + v := v + s.analyzeSchema(k, &v, slashpath.Join(refURI, "properties")) + } + + for k, v := range schema.PatternProperties { + v := v + // NOTE: swagger 2.0 does not support PatternProperties. + // However it is possible to analyze this in a schema + s.analyzeSchema(k, &v, slashpath.Join(refURI, "patternProperties")) + } + + for i := range schema.AllOf { + v := &schema.AllOf[i] + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf")) + } + + if len(schema.AllOf) > 0 { + s.allOfs["#"+refURI] = schRef + } + + for i := range schema.AnyOf { + v := &schema.AnyOf[i] + // NOTE: swagger 2.0 does not support anyOf constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf")) + } + + for i := range schema.OneOf { + v := &schema.OneOf[i] + // NOTE: swagger 2.0 does not support oneOf constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf")) + } + + if schema.Not != nil { + // NOTE: swagger 2.0 does not support "not" constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema("not", schema.Not, refURI) + } + + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + s.analyzeSchema("additionalProperties", schema.AdditionalProperties.Schema, refURI) + } + + if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { + // NOTE: swagger 2.0 does not support AdditionalItems. + // However it is possible to analyze this in a schema + s.analyzeSchema("additionalItems", schema.AdditionalItems.Schema, refURI) + } + + if schema.Items != nil { + if schema.Items.Schema != nil { + s.analyzeSchema("items", schema.Items.Schema, refURI) + } + + for i := range schema.Items.Schemas { + sch := &schema.Items.Schemas[i] + s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items")) + } + } +} + +// SecurityRequirement is a representation of a security requirement for an operation +type SecurityRequirement struct { + Name string + Scopes []string +} + +// SecurityRequirementsFor gets the security requirements for the operation +func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) [][]SecurityRequirement { + if s.spec.Security == nil && operation.Security == nil { + return nil + } + + schemes := s.spec.Security + if operation.Security != nil { + schemes = operation.Security + } + + result := [][]SecurityRequirement{} + for _, scheme := range schemes { + if len(scheme) == 0 { + // append a zero object for anonymous + result = append(result, []SecurityRequirement{{}}) + + continue + } + + var reqs []SecurityRequirement + for k, v := range scheme { + if v == nil { + v = []string{} + } + reqs = append(reqs, SecurityRequirement{Name: k, Scopes: v}) + } + + result = append(result, reqs) + } + + return result +} + +// SecurityDefinitionsForRequirements gets the matching security definitions for a set of requirements +func (s *Spec) SecurityDefinitionsForRequirements(requirements []SecurityRequirement) map[string]spec.SecurityScheme { + result := make(map[string]spec.SecurityScheme) + + for _, v := range requirements { + if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { + if definition != nil { + result[v.Name] = *definition + } + } + } + + return result +} + +// SecurityDefinitionsFor gets the matching security definitions for a set of requirements +func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme { + requirements := s.SecurityRequirementsFor(operation) + if len(requirements) == 0 { + return nil + } + + result := make(map[string]spec.SecurityScheme) + for _, reqs := range requirements { + for _, v := range reqs { + if v.Name == "" { + // optional requirement + continue + } + + if _, ok := result[v.Name]; ok { + // duplicate requirement + continue + } + + if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { + if definition != nil { + result[v.Name] = *definition + } + } + } + } + + return result +} + +// ConsumesFor gets the mediatypes for the operation +func (s *Spec) ConsumesFor(operation *spec.Operation) []string { + if len(operation.Consumes) == 0 { + cons := make(map[string]struct{}, len(s.spec.Consumes)) + for _, k := range s.spec.Consumes { + cons[k] = struct{}{} + } + + return s.structMapKeys(cons) + } + + cons := make(map[string]struct{}, len(operation.Consumes)) + for _, c := range operation.Consumes { + cons[c] = struct{}{} + } + + return s.structMapKeys(cons) +} + +// ProducesFor gets the mediatypes for the operation +func (s *Spec) ProducesFor(operation *spec.Operation) []string { + if len(operation.Produces) == 0 { + prod := make(map[string]struct{}, len(s.spec.Produces)) + for _, k := range s.spec.Produces { + prod[k] = struct{}{} + } + + return s.structMapKeys(prod) + } + + prod := make(map[string]struct{}, len(operation.Produces)) + for _, c := range operation.Produces { + prod[c] = struct{}{} + } + + return s.structMapKeys(prod) +} + +func mapKeyFromParam(param *spec.Parameter) string { + return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param)) +} + +func fieldNameFromParam(param *spec.Parameter) string { + // TODO: this should be x-go-name + if nm, ok := param.Extensions.GetString("go-name"); ok { + return nm + } + + return swag.ToGoName(param.Name) +} + +// ErrorOnParamFunc is a callback function to be invoked +// whenever an error is encountered while resolving references +// on parameters. +// +// This function takes as input the spec.Parameter which triggered the +// error and the error itself. +// +// If the callback function returns false, the calling function should bail. +// +// If it returns true, the calling function should continue evaluating parameters. +// A nil ErrorOnParamFunc must be evaluated as equivalent to panic(). +type ErrorOnParamFunc func(spec.Parameter, error) bool + +func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter, callmeOnError ErrorOnParamFunc) { + for _, param := range parameters { + pr := param + if pr.Ref.String() == "" { + res[mapKeyFromParam(&pr)] = pr + + continue + } + + // resolve $ref + if callmeOnError == nil { + callmeOnError = func(_ spec.Parameter, err error) bool { + panic(err) + } + } + + obj, _, err := pr.Ref.GetPointer().Get(s.spec) + if err != nil { + if callmeOnError(param, fmt.Errorf("invalid reference: %q", pr.Ref.String())) { + continue + } + + break + } + + objAsParam, ok := obj.(spec.Parameter) + if !ok { + if callmeOnError(param, fmt.Errorf("resolved reference is not a parameter: %q", pr.Ref.String())) { + continue + } + + break + } + + pr = objAsParam + res[mapKeyFromParam(&pr)] = pr + } +} + +// ParametersFor the specified operation id. +// +// Assumes parameters properly resolve references if any and that +// such references actually resolve to a parameter object. +// Otherwise, panics. +func (s *Spec) ParametersFor(operationID string) []spec.Parameter { + return s.SafeParametersFor(operationID, nil) +} + +// SafeParametersFor the specified operation id. +// +// Does not assume parameters properly resolve references or that +// such references actually resolve to a parameter object. +// +// Upon error, invoke a ErrorOnParamFunc callback with the erroneous +// parameters. If the callback is set to nil, panics upon errors. +func (s *Spec) SafeParametersFor(operationID string, callmeOnError ErrorOnParamFunc) []spec.Parameter { + gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter { + bag := make(map[string]spec.Parameter) + s.paramsAsMap(pi.Parameters, bag, callmeOnError) + s.paramsAsMap(op.Parameters, bag, callmeOnError) + + var res []spec.Parameter + for _, v := range bag { + res = append(res, v) + } + + return res + } + + for _, pi := range s.spec.Paths.Paths { + if pi.Get != nil && pi.Get.ID == operationID { + return gatherParams(&pi, pi.Get) //#nosec + } + if pi.Head != nil && pi.Head.ID == operationID { + return gatherParams(&pi, pi.Head) //#nosec + } + if pi.Options != nil && pi.Options.ID == operationID { + return gatherParams(&pi, pi.Options) //#nosec + } + if pi.Post != nil && pi.Post.ID == operationID { + return gatherParams(&pi, pi.Post) //#nosec + } + if pi.Patch != nil && pi.Patch.ID == operationID { + return gatherParams(&pi, pi.Patch) //#nosec + } + if pi.Put != nil && pi.Put.ID == operationID { + return gatherParams(&pi, pi.Put) //#nosec + } + if pi.Delete != nil && pi.Delete.ID == operationID { + return gatherParams(&pi, pi.Delete) //#nosec + } + } + + return nil +} + +// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that +// apply for the method and path. +// +// Assumes parameters properly resolve references if any and that +// such references actually resolve to a parameter object. +// Otherwise, panics. +func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter { + return s.SafeParamsFor(method, path, nil) +} + +// SafeParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that +// apply for the method and path. +// +// Does not assume parameters properly resolve references or that +// such references actually resolve to a parameter object. +// +// Upon error, invoke a ErrorOnParamFunc callback with the erroneous +// parameters. If the callback is set to nil, panics upon errors. +func (s *Spec) SafeParamsFor(method, path string, callmeOnError ErrorOnParamFunc) map[string]spec.Parameter { + res := make(map[string]spec.Parameter) + if pi, ok := s.spec.Paths.Paths[path]; ok { + s.paramsAsMap(pi.Parameters, res, callmeOnError) + s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res, callmeOnError) + } + + return res +} + +// OperationForName gets the operation for the given id +func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) { + for method, pathItem := range s.operations { + for path, op := range pathItem { + if operationID == op.ID { + return method, path, op, true + } + } + } + + return "", "", nil, false +} + +// OperationFor the given method and path +func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) { + if mp, ok := s.operations[strings.ToUpper(method)]; ok { + op, fn := mp[path] + + return op, fn + } + + return nil, false +} + +// Operations gathers all the operations specified in the spec document +func (s *Spec) Operations() map[string]map[string]*spec.Operation { + return s.operations +} + +func (s *Spec) structMapKeys(mp map[string]struct{}) []string { + if len(mp) == 0 { + return nil + } + + result := make([]string, 0, len(mp)) + for k := range mp { + result = append(result, k) + } + + return result +} + +// AllPaths returns all the paths in the swagger spec +func (s *Spec) AllPaths() map[string]spec.PathItem { + if s.spec == nil || s.spec.Paths == nil { + return nil + } + + return s.spec.Paths.Paths +} + +// OperationIDs gets all the operation ids based on method an dpath +func (s *Spec) OperationIDs() []string { + if len(s.operations) == 0 { + return nil + } + + result := make([]string, 0, len(s.operations)) + for method, v := range s.operations { + for p, o := range v { + if o.ID != "" { + result = append(result, o.ID) + } else { + result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) + } + } + } + + return result +} + +// OperationMethodPaths gets all the operation ids based on method an dpath +func (s *Spec) OperationMethodPaths() []string { + if len(s.operations) == 0 { + return nil + } + + result := make([]string, 0, len(s.operations)) + for method, v := range s.operations { + for p := range v { + result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) + } + } + + return result +} + +// RequiredConsumes gets all the distinct consumes that are specified in the specification document +func (s *Spec) RequiredConsumes() []string { + return s.structMapKeys(s.consumes) +} + +// RequiredProduces gets all the distinct produces that are specified in the specification document +func (s *Spec) RequiredProduces() []string { + return s.structMapKeys(s.produces) +} + +// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec +func (s *Spec) RequiredSecuritySchemes() []string { + return s.structMapKeys(s.authSchemes) +} + +// SchemaRef is a reference to a schema +type SchemaRef struct { + Name string + Ref spec.Ref + Schema *spec.Schema + TopLevel bool +} + +// SchemasWithAllOf returns schema references to all schemas that are defined +// with an allOf key +func (s *Spec) SchemasWithAllOf() (result []SchemaRef) { + for _, v := range s.allOfs { + result = append(result, v) + } + + return +} + +// AllDefinitions returns schema references for all the definitions that were discovered +func (s *Spec) AllDefinitions() (result []SchemaRef) { + for _, v := range s.allSchemas { + result = append(result, v) + } + + return +} + +// AllDefinitionReferences returns json refs for all the discovered schemas +func (s *Spec) AllDefinitionReferences() (result []string) { + for _, v := range s.references.schemas { + result = append(result, v.String()) + } + + return +} + +// AllParameterReferences returns json refs for all the discovered parameters +func (s *Spec) AllParameterReferences() (result []string) { + for _, v := range s.references.parameters { + result = append(result, v.String()) + } + + return +} + +// AllResponseReferences returns json refs for all the discovered responses +func (s *Spec) AllResponseReferences() (result []string) { + for _, v := range s.references.responses { + result = append(result, v.String()) + } + + return +} + +// AllPathItemReferences returns the references for all the items +func (s *Spec) AllPathItemReferences() (result []string) { + for _, v := range s.references.pathItems { + result = append(result, v.String()) + } + + return +} + +// AllItemsReferences returns the references for all the items in simple schemas (parameters or headers). +// +// NOTE: since Swagger 2.0 forbids $ref in simple params, this should always yield an empty slice for a valid +// Swagger 2.0 spec. +func (s *Spec) AllItemsReferences() (result []string) { + for _, v := range s.references.items { + result = append(result, v.String()) + } + + return +} + +// AllReferences returns all the references found in the document, with possible duplicates +func (s *Spec) AllReferences() (result []string) { + for _, v := range s.references.allRefs { + result = append(result, v.String()) + } + + return +} + +// AllRefs returns all the unique references found in the document +func (s *Spec) AllRefs() (result []spec.Ref) { + set := make(map[string]struct{}) + for _, v := range s.references.allRefs { + a := v.String() + if a == "" { + continue + } + + if _, ok := set[a]; !ok { + set[a] = struct{}{} + result = append(result, v) + } + } + + return +} + +func cloneStringMap(source map[string]string) map[string]string { + res := make(map[string]string, len(source)) + for k, v := range source { + res[k] = v + } + + return res +} + +func cloneEnumMap(source map[string][]interface{}) map[string][]interface{} { + res := make(map[string][]interface{}, len(source)) + for k, v := range source { + res[k] = v + } + + return res +} + +// ParameterPatterns returns all the patterns found in parameters +// the map is cloned to avoid accidental changes +func (s *Spec) ParameterPatterns() map[string]string { + return cloneStringMap(s.patterns.parameters) +} + +// HeaderPatterns returns all the patterns found in response headers +// the map is cloned to avoid accidental changes +func (s *Spec) HeaderPatterns() map[string]string { + return cloneStringMap(s.patterns.headers) +} + +// ItemsPatterns returns all the patterns found in simple array items +// the map is cloned to avoid accidental changes +func (s *Spec) ItemsPatterns() map[string]string { + return cloneStringMap(s.patterns.items) +} + +// SchemaPatterns returns all the patterns found in schemas +// the map is cloned to avoid accidental changes +func (s *Spec) SchemaPatterns() map[string]string { + return cloneStringMap(s.patterns.schemas) +} + +// AllPatterns returns all the patterns found in the spec +// the map is cloned to avoid accidental changes +func (s *Spec) AllPatterns() map[string]string { + return cloneStringMap(s.patterns.allPatterns) +} + +// ParameterEnums returns all the enums found in parameters +// the map is cloned to avoid accidental changes +func (s *Spec) ParameterEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.parameters) +} + +// HeaderEnums returns all the enums found in response headers +// the map is cloned to avoid accidental changes +func (s *Spec) HeaderEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.headers) +} + +// ItemsEnums returns all the enums found in simple array items +// the map is cloned to avoid accidental changes +func (s *Spec) ItemsEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.items) +} + +// SchemaEnums returns all the enums found in schemas +// the map is cloned to avoid accidental changes +func (s *Spec) SchemaEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.schemas) +} + +// AllEnums returns all the enums found in the spec +// the map is cloned to avoid accidental changes +func (s *Spec) AllEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.allEnums) +} diff --git a/vendor/github.com/go-openapi/analysis/appveyor.yml b/test/tools/vendor/github.com/go-openapi/analysis/appveyor.yml similarity index 100% rename from vendor/github.com/go-openapi/analysis/appveyor.yml rename to test/tools/vendor/github.com/go-openapi/analysis/appveyor.yml diff --git a/test/tools/vendor/github.com/go-openapi/analysis/debug.go b/test/tools/vendor/github.com/go-openapi/analysis/debug.go new file mode 100644 index 0000000000..33c15704ec --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/debug.go @@ -0,0 +1,23 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "os" + + "github.com/go-openapi/analysis/internal/debug" +) + +var debugLog = debug.GetLogger("analysis", os.Getenv("SWAGGER_DEBUG") != "") diff --git a/test/tools/vendor/github.com/go-openapi/analysis/doc.go b/test/tools/vendor/github.com/go-openapi/analysis/doc.go new file mode 100644 index 0000000000..d5294c0950 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/doc.go @@ -0,0 +1,43 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package analysis provides methods to work with a Swagger specification document from +package go-openapi/spec. + +Analyzing a specification + +An analysed specification object (type Spec) provides methods to work with swagger definition. + +Flattening or expanding a specification + +Flattening a specification bundles all remote $ref in the main spec document. +Depending on flattening options, additional preprocessing may take place: + - full flattening: replacing all inline complex constructs by a named entry in #/definitions + - expand: replace all $ref's in the document by their expanded content + +Merging several specifications + +Mixin several specifications merges all Swagger constructs, and warns about found conflicts. + +Fixing a specification + +Unmarshalling a specification with golang json unmarshalling may lead to +some unwanted result on present but empty fields. + +Analyzing a Swagger schema + +Swagger schemas are analyzed to determine their complexity and qualify their content. +*/ +package analysis diff --git a/test/tools/vendor/github.com/go-openapi/analysis/fixer.go b/test/tools/vendor/github.com/go-openapi/analysis/fixer.go new file mode 100644 index 0000000000..7c2ca08416 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/fixer.go @@ -0,0 +1,79 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import "github.com/go-openapi/spec" + +// FixEmptyResponseDescriptions replaces empty ("") response +// descriptions in the input with "(empty)" to ensure that the +// resulting Swagger is stays valid. The problem appears to arise +// from reading in valid specs that have a explicit response +// description of "" (valid, response.description is required), but +// due to zero values being omitted upon re-serializing (omitempty) we +// lose them unless we stick some chars in there. +func FixEmptyResponseDescriptions(s *spec.Swagger) { + for k, v := range s.Responses { + FixEmptyDesc(&v) //#nosec + s.Responses[k] = v + } + + if s.Paths == nil { + return + } + + for _, v := range s.Paths.Paths { + if v.Get != nil { + FixEmptyDescs(v.Get.Responses) + } + if v.Put != nil { + FixEmptyDescs(v.Put.Responses) + } + if v.Post != nil { + FixEmptyDescs(v.Post.Responses) + } + if v.Delete != nil { + FixEmptyDescs(v.Delete.Responses) + } + if v.Options != nil { + FixEmptyDescs(v.Options.Responses) + } + if v.Head != nil { + FixEmptyDescs(v.Head.Responses) + } + if v.Patch != nil { + FixEmptyDescs(v.Patch.Responses) + } + } +} + +// FixEmptyDescs adds "(empty)" as the description for any Response in +// the given Responses object that doesn't already have one. +func FixEmptyDescs(rs *spec.Responses) { + FixEmptyDesc(rs.Default) + for k, v := range rs.StatusCodeResponses { + FixEmptyDesc(&v) //#nosec + rs.StatusCodeResponses[k] = v + } +} + +// FixEmptyDesc adds "(empty)" as the description to the given +// Response object if it doesn't already have one and isn't a +// ref. No-op on nil input. +func FixEmptyDesc(rs *spec.Response) { + if rs == nil || rs.Description != "" || rs.Ref.Ref.GetURL() != nil { + return + } + rs.Description = "(empty)" +} diff --git a/test/tools/vendor/github.com/go-openapi/analysis/flatten.go b/test/tools/vendor/github.com/go-openapi/analysis/flatten.go new file mode 100644 index 0000000000..0576220fb3 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/flatten.go @@ -0,0 +1,802 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + "log" + "path" + "sort" + "strings" + + "github.com/go-openapi/analysis/internal/flatten/normalize" + "github.com/go-openapi/analysis/internal/flatten/operations" + "github.com/go-openapi/analysis/internal/flatten/replace" + "github.com/go-openapi/analysis/internal/flatten/schutils" + "github.com/go-openapi/analysis/internal/flatten/sortref" + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" +) + +const definitionsPath = "#/definitions" + +// newRef stores information about refs created during the flattening process +type newRef struct { + key string + newName string + path string + isOAIGen bool + resolved bool + schema *spec.Schema + parents []string +} + +// context stores intermediary results from flatten +type context struct { + newRefs map[string]*newRef + warnings []string + resolved map[string]string +} + +func newContext() *context { + return &context{ + newRefs: make(map[string]*newRef, 150), + warnings: make([]string, 0), + resolved: make(map[string]string, 50), + } +} + +// Flatten an analyzed spec and produce a self-contained spec bundle. +// +// There is a minimal and a full flattening mode. +// +// +// Minimally flattening a spec means: +// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left +// unscathed) +// - Importing external (http, file) references so they become internal to the document +// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers +// like "$ref": "#/definitions/myObject/allOfs/1") +// +// A minimally flattened spec thus guarantees the following properties: +// - all $refs point to a local definition (i.e. '#/definitions/...') +// - definitions are unique +// +// NOTE: arbitrary JSON pointers (other than $refs to top level definitions) are rewritten as definitions if they +// represent a complex schema or express commonality in the spec. +// Otherwise, they are simply expanded. +// Self-referencing JSON pointers cannot resolve to a type and trigger an error. +// +// +// Minimal flattening is necessary and sufficient for codegen rendering using go-swagger. +// +// Fully flattening a spec means: +// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion. +// +// By complex, we mean every JSON object with some properties. +// Arrays, when they do not define a tuple, +// or empty objects with or without additionalProperties, are not considered complex and remain inline. +// +// NOTE: rewritten schemas get a vendor extension x-go-gen-location so we know from which part of the spec definitions +// have been created. +// +// Available flattening options: +// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched +// - Expand: expand all $ref's in the document (inoperant if Minimal set to true) +// - Verbose: croaks about name conflicts detected +// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening +// +// NOTE: expansion removes all $ref save circular $ref, which remain in place +// +// TODO: additional options +// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a +// x-go-name extension +// - LiftAllOfs: +// - limit the flattening of allOf members when simple objects +// - merge allOf with validation only +// - merge allOf with extensions only +// - ... +// +func Flatten(opts FlattenOpts) error { + debugLog("FlattenOpts: %#v", opts) + + opts.flattenContext = newContext() + + // 1. Recursively expand responses, parameters, path items and items in simple schemas. + // + // This simplifies the spec and leaves only the $ref's in schema objects. + if err := expand(&opts); err != nil { + return err + } + + // 2. Strip the current document from absolute $ref's that actually a in the root, + // so we can recognize them as proper definitions + // + // In particular, this works around issue go-openapi/spec#76: leading absolute file in $ref is stripped + if err := normalizeRef(&opts); err != nil { + return err + } + + // 3. Optionally remove shared parameters and responses already expanded (now unused). + // + // Operation parameters (i.e. under paths) remain. + if opts.RemoveUnused { + removeUnusedShared(&opts) + } + + // 4. Import all remote references. + if err := importReferences(&opts); err != nil { + return err + } + + // 5. full flattening: rewrite inline schemas (schemas that aren't simple types or arrays or maps) + if !opts.Minimal && !opts.Expand { + if err := nameInlinedSchemas(&opts); err != nil { + return err + } + } + + // 6. Rewrite JSON pointers other than $ref to named definitions + // and attempt to resolve conflicting names whenever possible. + if err := stripPointersAndOAIGen(&opts); err != nil { + return err + } + + // 7. Strip the spec from unused definitions + if opts.RemoveUnused { + removeUnused(&opts) + } + + // 8. Issue warning notifications, if any + opts.croak() + + // TODO: simplify known schema patterns to flat objects with properties + // examples: + // - lift simple allOf object, + // - empty allOf with validation only or extensions only + // - rework allOf arrays + // - rework allOf additionalProperties + + return nil +} + +func expand(opts *FlattenOpts) error { + if err := spec.ExpandSpec(opts.Swagger(), opts.ExpandOpts(!opts.Expand)); err != nil { + return err + } + + opts.Spec.reload() // re-analyze + + return nil +} + +// normalizeRef strips the current file from any absolute file $ref. This works around issue go-openapi/spec#76: +// leading absolute file in $ref is stripped +func normalizeRef(opts *FlattenOpts) error { + debugLog("normalizeRef") + + altered := false + for k, w := range opts.Spec.references.allRefs { + if !strings.HasPrefix(w.String(), opts.BasePath+definitionsPath) { // may be a mix of / and \, depending on OS + continue + } + + altered = true + debugLog("stripping absolute path for: %s", w.String()) + + // strip the base path from definition + if err := replace.UpdateRef(opts.Swagger(), k, + spec.MustCreateRef(path.Join(definitionsPath, path.Base(w.String())))); err != nil { + return err + } + } + + if altered { + opts.Spec.reload() // re-analyze + } + + return nil +} + +func removeUnusedShared(opts *FlattenOpts) { + opts.Swagger().Parameters = nil + opts.Swagger().Responses = nil + + opts.Spec.reload() // re-analyze +} + +func importReferences(opts *FlattenOpts) error { + var ( + imported bool + err error + ) + + for !imported && err == nil { + // iteratively import remote references until none left. + // This inlining deals with name conflicts by introducing auto-generated names ("OAIGen") + imported, err = importExternalReferences(opts) + + opts.Spec.reload() // re-analyze + } + + return err +} + +// nameInlinedSchemas replaces every complex inline construct by a named definition. +func nameInlinedSchemas(opts *FlattenOpts) error { + debugLog("nameInlinedSchemas") + + namer := &InlineSchemaNamer{ + Spec: opts.Swagger(), + Operations: operations.AllOpRefsByRef(opts.Spec, nil), + flattenContext: opts.flattenContext, + opts: opts, + } + + depthFirst := sortref.DepthFirst(opts.Spec.allSchemas) + for _, key := range depthFirst { + sch := opts.Spec.allSchemas[key] + if sch.Schema == nil || sch.Schema.Ref.String() != "" || sch.TopLevel { + continue + } + + asch, err := Schema(SchemaOpts{Schema: sch.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if err != nil { + return fmt.Errorf("schema analysis [%s]: %w", key, err) + } + + if asch.isAnalyzedAsComplex() { // move complex schemas to definitions + if err := namer.Name(key, sch.Schema, asch); err != nil { + return err + } + } + } + + opts.Spec.reload() // re-analyze + + return nil +} + +func removeUnused(opts *FlattenOpts) { + expected := make(map[string]struct{}) + for k := range opts.Swagger().Definitions { + expected[path.Join(definitionsPath, jsonpointer.Escape(k))] = struct{}{} + } + + for _, k := range opts.Spec.AllDefinitionReferences() { + delete(expected, k) + } + + for k := range expected { + debugLog("removing unused definition %s", path.Base(k)) + if opts.Verbose { + log.Printf("info: removing unused definition: %s", path.Base(k)) + } + delete(opts.Swagger().Definitions, path.Base(k)) + } + + opts.Spec.reload() // re-analyze +} + +func importKnownRef(entry sortref.RefRevIdx, refStr, newName string, opts *FlattenOpts) error { + // rewrite ref with already resolved external ref (useful for cyclical refs): + // rewrite external refs to local ones + debugLog("resolving known ref [%s] to %s", refStr, newName) + + for _, key := range entry.Keys { + if err := replace.UpdateRef(opts.Swagger(), key, spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return err + } + } + + return nil +} + +func importNewRef(entry sortref.RefRevIdx, refStr string, opts *FlattenOpts) error { + var ( + isOAIGen bool + newName string + ) + + debugLog("resolving schema from remote $ref [%s]", refStr) + + sch, err := spec.ResolveRefWithBase(opts.Swagger(), &entry.Ref, opts.ExpandOpts(false)) + if err != nil { + return fmt.Errorf("could not resolve schema: %w", err) + } + + // at this stage only $ref analysis matters + partialAnalyzer := &Spec{ + references: referenceAnalysis{}, + patterns: patternAnalysis{}, + enums: enumAnalysis{}, + } + partialAnalyzer.reset() + partialAnalyzer.analyzeSchema("", sch, "/") + + // now rewrite those refs with rebase + for key, ref := range partialAnalyzer.references.allRefs { + if err := replace.UpdateRef(sch, key, spec.MustCreateRef(normalize.RebaseRef(entry.Ref.String(), ref.String()))); err != nil { + return fmt.Errorf("failed to rewrite ref for key %q at %s: %w", key, entry.Ref.String(), err) + } + } + + // generate a unique name - isOAIGen means that a naming conflict was resolved by changing the name + newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref)) + debugLog("new name for [%s]: %s - with name conflict:%t", strings.Join(entry.Keys, ", "), newName, isOAIGen) + + opts.flattenContext.resolved[refStr] = newName + + // rewrite the external refs to local ones + for _, key := range entry.Keys { + if err := replace.UpdateRef(opts.Swagger(), key, + spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return err + } + + // keep track of created refs + resolved := false + if _, ok := opts.flattenContext.newRefs[key]; ok { + resolved = opts.flattenContext.newRefs[key].resolved + } + + debugLog("keeping track of ref: %s (%s), resolved: %t", key, newName, resolved) + opts.flattenContext.newRefs[key] = &newRef{ + key: key, + newName: newName, + path: path.Join(definitionsPath, newName), + isOAIGen: isOAIGen, + resolved: resolved, + schema: sch, + } + } + + // add the resolved schema to the definitions + schutils.Save(opts.Swagger(), newName, sch) + + return nil +} + +// importExternalReferences iteratively digs remote references and imports them into the main schema. +// +// At every iteration, new remotes may be found when digging deeper: they are rebased to the current schema before being imported. +// +// This returns true when no more remote references can be found. +func importExternalReferences(opts *FlattenOpts) (bool, error) { + debugLog("importExternalReferences") + + groupedRefs := sortref.ReverseIndex(opts.Spec.references.schemas, opts.BasePath) + sortedRefStr := make([]string, 0, len(groupedRefs)) + if opts.flattenContext == nil { + opts.flattenContext = newContext() + } + + // sort $ref resolution to ensure deterministic name conflict resolution + for refStr := range groupedRefs { + sortedRefStr = append(sortedRefStr, refStr) + } + sort.Strings(sortedRefStr) + + complete := true + + for _, refStr := range sortedRefStr { + entry := groupedRefs[refStr] + if entry.Ref.HasFragmentOnly { + continue + } + + complete = false + + newName := opts.flattenContext.resolved[refStr] + if newName != "" { + if err := importKnownRef(entry, refStr, newName, opts); err != nil { + return false, err + } + + continue + } + + // resolve schemas + if err := importNewRef(entry, refStr, opts); err != nil { + return false, err + } + } + + // maintains ref index entries + for k := range opts.flattenContext.newRefs { + r := opts.flattenContext.newRefs[k] + + // update tracking with resolved schemas + if r.schema.Ref.String() != "" { + ref := spec.MustCreateRef(r.path) + sch, err := spec.ResolveRefWithBase(opts.Swagger(), &ref, opts.ExpandOpts(false)) + if err != nil { + return false, fmt.Errorf("could not resolve schema: %w", err) + } + + r.schema = sch + } + + if r.path == k { + continue + } + + // update tracking with renamed keys: got a cascade of refs + renamed := *r + renamed.key = r.path + opts.flattenContext.newRefs[renamed.path] = &renamed + + // indirect ref + r.newName = path.Base(k) + r.schema = spec.RefSchema(r.path) + r.path = k + r.isOAIGen = strings.Contains(k, "OAIGen") + } + + return complete, nil +} + +// stripPointersAndOAIGen removes anonymous JSON pointers from spec and chain with name conflicts handler. +// This loops until the spec has no such pointer and all name conflicts have been reduced as much as possible. +func stripPointersAndOAIGen(opts *FlattenOpts) error { + // name all JSON pointers to anonymous documents + if err := namePointers(opts); err != nil { + return err + } + + // remove unnecessary OAIGen ref (created when flattening external refs creates name conflicts) + hasIntroducedPointerOrInline, ers := stripOAIGen(opts) + if ers != nil { + return ers + } + + // iterate as pointer or OAIGen resolution may introduce inline schemas or pointers + for hasIntroducedPointerOrInline { + if !opts.Minimal { + opts.Spec.reload() // re-analyze + if err := nameInlinedSchemas(opts); err != nil { + return err + } + } + + if err := namePointers(opts); err != nil { + return err + } + + // restrip and re-analyze + var err error + if hasIntroducedPointerOrInline, err = stripOAIGen(opts); err != nil { + return err + } + } + + return nil +} + +// stripOAIGen strips the spec from unnecessary OAIGen constructs, initially created to dedupe flattened definitions. +// +// A dedupe is deemed unnecessary whenever: +// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining) +// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to +// the first parent. +// +// This function returns true whenever it re-inlined a complex schema, so the caller may chose to iterate +// pointer and name resolution again. +func stripOAIGen(opts *FlattenOpts) (bool, error) { + debugLog("stripOAIGen") + replacedWithComplex := false + + // figure out referers of OAIGen definitions (doing it before the ref start mutating) + for _, r := range opts.flattenContext.newRefs { + updateRefParents(opts.Spec.references.allRefs, r) + } + + for k := range opts.flattenContext.newRefs { + r := opts.flattenContext.newRefs[k] + debugLog("newRefs[%s]: isOAIGen: %t, resolved: %t, name: %s, path:%s, #parents: %d, parents: %v, ref: %s", + k, r.isOAIGen, r.resolved, r.newName, r.path, len(r.parents), r.parents, r.schema.Ref.String()) + + if !r.isOAIGen || len(r.parents) == 0 { + continue + } + + hasReplacedWithComplex, err := stripOAIGenForRef(opts, k, r) + if err != nil { + return replacedWithComplex, err + } + + replacedWithComplex = replacedWithComplex || hasReplacedWithComplex + } + + debugLog("replacedWithComplex: %t", replacedWithComplex) + opts.Spec.reload() // re-analyze + + return replacedWithComplex, nil +} + +// updateRefParents updates all parents of an updated $ref +func updateRefParents(allRefs map[string]spec.Ref, r *newRef) { + if !r.isOAIGen || r.resolved { // bail on already resolved entries (avoid looping) + return + } + for k, v := range allRefs { + if r.path != v.String() { + continue + } + + found := false + for _, p := range r.parents { + if p == k { + found = true + + break + } + } + if !found { + r.parents = append(r.parents, k) + } + } +} + +func stripOAIGenForRef(opts *FlattenOpts, k string, r *newRef) (bool, error) { + replacedWithComplex := false + + pr := sortref.TopmostFirst(r.parents) + + // rewrite first parent schema in hierarchical then lexicographical order + debugLog("rewrite first parent %s with schema", pr[0]) + if err := replace.UpdateRefWithSchema(opts.Swagger(), pr[0], r.schema); err != nil { + return false, err + } + + if pa, ok := opts.flattenContext.newRefs[pr[0]]; ok && pa.isOAIGen { + // update parent in ref index entry + debugLog("update parent entry: %s", pr[0]) + pa.schema = r.schema + pa.resolved = false + replacedWithComplex = true + } + + // rewrite other parents to point to first parent + if len(pr) > 1 { + for _, p := range pr[1:] { + replacingRef := spec.MustCreateRef(pr[0]) + + // set complex when replacing ref is an anonymous jsonpointer: further processing may be required + replacedWithComplex = replacedWithComplex || path.Dir(replacingRef.String()) != definitionsPath + debugLog("rewrite parent with ref: %s", replacingRef.String()) + + // NOTE: it is possible at this stage to introduce json pointers (to non-definitions places). + // Those are stripped later on. + if err := replace.UpdateRef(opts.Swagger(), p, replacingRef); err != nil { + return false, err + } + + if pa, ok := opts.flattenContext.newRefs[p]; ok && pa.isOAIGen { + // update parent in ref index + debugLog("update parent entry: %s", p) + pa.schema = r.schema + pa.resolved = false + replacedWithComplex = true + } + } + } + + // remove OAIGen definition + debugLog("removing definition %s", path.Base(r.path)) + delete(opts.Swagger().Definitions, path.Base(r.path)) + + // propagate changes in ref index for keys which have this one as a parent + for kk, value := range opts.flattenContext.newRefs { + if kk == k || !value.isOAIGen || value.resolved { + continue + } + + found := false + newParents := make([]string, 0, len(value.parents)) + for _, parent := range value.parents { + switch { + case parent == r.path: + found = true + parent = pr[0] + case strings.HasPrefix(parent, r.path+"/"): + found = true + parent = path.Join(pr[0], strings.TrimPrefix(parent, r.path)) + } + + newParents = append(newParents, parent) + } + + if found { + value.parents = newParents + } + } + + // mark naming conflict as resolved + debugLog("marking naming conflict resolved for key: %s", r.key) + opts.flattenContext.newRefs[r.key].isOAIGen = false + opts.flattenContext.newRefs[r.key].resolved = true + + // determine if the previous substitution did inline a complex schema + if r.schema != nil && r.schema.Ref.String() == "" { // inline schema + asch, err := Schema(SchemaOpts{Schema: r.schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if err != nil { + return false, err + } + + debugLog("re-inlined schema: parent: %s, %t", pr[0], asch.isAnalyzedAsComplex()) + replacedWithComplex = replacedWithComplex || !(path.Dir(pr[0]) == definitionsPath) && asch.isAnalyzedAsComplex() + } + + return replacedWithComplex, nil +} + +// namePointers replaces all JSON pointers to anonymous documents by a $ref to a new named definitions. +// +// This is carried on depth-first. Pointers to $refs which are top level definitions are replaced by the $ref itself. +// Pointers to simple types are expanded, unless they express commonality (i.e. several such $ref are used). +func namePointers(opts *FlattenOpts) error { + debugLog("name pointers") + + refsToReplace := make(map[string]SchemaRef, len(opts.Spec.references.schemas)) + for k, ref := range opts.Spec.references.allRefs { + if path.Dir(ref.String()) == definitionsPath { + // this a ref to a top-level definition: ok + continue + } + + result, err := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), ref) + if err != nil { + return fmt.Errorf("at %s, %w", k, err) + } + + replacingRef := result.Ref + sch := result.Schema + if opts.flattenContext != nil { + opts.flattenContext.warnings = append(opts.flattenContext.warnings, result.Warnings...) + } + + debugLog("planning pointer to replace at %s: %s, resolved to: %s", k, ref.String(), replacingRef.String()) + refsToReplace[k] = SchemaRef{ + Name: k, // caller + Ref: replacingRef, // called + Schema: sch, + TopLevel: path.Dir(replacingRef.String()) == definitionsPath, + } + } + + depthFirst := sortref.DepthFirst(refsToReplace) + namer := &InlineSchemaNamer{ + Spec: opts.Swagger(), + Operations: operations.AllOpRefsByRef(opts.Spec, nil), + flattenContext: opts.flattenContext, + opts: opts, + } + + for _, key := range depthFirst { + v := refsToReplace[key] + // update current replacement, which may have been updated by previous changes of deeper elements + result, erd := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), v.Ref) + if erd != nil { + return fmt.Errorf("at %s, %w", key, erd) + } + + if opts.flattenContext != nil { + opts.flattenContext.warnings = append(opts.flattenContext.warnings, result.Warnings...) + } + + v.Ref = result.Ref + v.Schema = result.Schema + v.TopLevel = path.Dir(result.Ref.String()) == definitionsPath + debugLog("replacing pointer at %s: resolved to: %s", key, v.Ref.String()) + + if v.TopLevel { + debugLog("replace pointer %s by canonical definition: %s", key, v.Ref.String()) + + // if the schema is a $ref to a top level definition, just rewrite the pointer to this $ref + if err := replace.UpdateRef(opts.Swagger(), key, v.Ref); err != nil { + return err + } + + continue + } + + if err := flattenAnonPointer(key, v, refsToReplace, namer, opts); err != nil { + return err + } + } + + opts.Spec.reload() // re-analyze + + return nil +} + +func flattenAnonPointer(key string, v SchemaRef, refsToReplace map[string]SchemaRef, namer *InlineSchemaNamer, opts *FlattenOpts) error { + // this is a JSON pointer to an anonymous document (internal or external): + // create a definition for this schema when: + // - it is a complex schema + // - or it is pointed by more than one $ref (i.e. expresses commonality) + // otherwise, expand the pointer (single reference to a simple type) + // + // The named definition for this follows the target's key, not the caller's + debugLog("namePointers at %s for %s", key, v.Ref.String()) + + // qualify the expanded schema + asch, ers := Schema(SchemaOpts{Schema: v.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if ers != nil { + return fmt.Errorf("schema analysis [%s]: %w", key, ers) + } + callers := make([]string, 0, 64) + + debugLog("looking for callers") + + an := New(opts.Swagger()) + for k, w := range an.references.allRefs { + r, err := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), w) + if err != nil { + return fmt.Errorf("at %s, %w", key, err) + } + + if opts.flattenContext != nil { + opts.flattenContext.warnings = append(opts.flattenContext.warnings, r.Warnings...) + } + + if r.Ref.String() == v.Ref.String() { + callers = append(callers, k) + } + } + + debugLog("callers for %s: %d", v.Ref.String(), len(callers)) + if len(callers) == 0 { + // has already been updated and resolved + return nil + } + + parts := sortref.KeyParts(v.Ref.String()) + debugLog("number of callers for %s: %d", v.Ref.String(), len(callers)) + + // identifying edge case when the namer did nothing because we point to a non-schema object + // no definition is created and we expand the $ref for all callers + if (!asch.IsSimpleSchema || len(callers) > 1) && !parts.IsSharedParam() && !parts.IsSharedResponse() { + debugLog("replace JSON pointer at [%s] by definition: %s", key, v.Ref.String()) + if err := namer.Name(v.Ref.String(), v.Schema, asch); err != nil { + return err + } + + // regular case: we named the $ref as a definition, and we move all callers to this new $ref + for _, caller := range callers { + if caller == key { + continue + } + + // move $ref for next to resolve + debugLog("identified caller of %s at [%s]", v.Ref.String(), caller) + c := refsToReplace[caller] + c.Ref = v.Ref + refsToReplace[caller] = c + } + + return nil + } + + debugLog("expand JSON pointer for key=%s", key) + + if err := replace.UpdateRefWithSchema(opts.Swagger(), key, v.Schema); err != nil { + return err + } + // NOTE: there is no other caller to update + + return nil +} diff --git a/test/tools/vendor/github.com/go-openapi/analysis/flatten_name.go b/test/tools/vendor/github.com/go-openapi/analysis/flatten_name.go new file mode 100644 index 0000000000..3ad2ccfbfd --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/flatten_name.go @@ -0,0 +1,293 @@ +package analysis + +import ( + "fmt" + "path" + "sort" + "strings" + + "github.com/go-openapi/analysis/internal/flatten/operations" + "github.com/go-openapi/analysis/internal/flatten/replace" + "github.com/go-openapi/analysis/internal/flatten/schutils" + "github.com/go-openapi/analysis/internal/flatten/sortref" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +// InlineSchemaNamer finds a new name for an inlined type +type InlineSchemaNamer struct { + Spec *spec.Swagger + Operations map[string]operations.OpRef + flattenContext *context + opts *FlattenOpts +} + +// Name yields a new name for the inline schema +func (isn *InlineSchemaNamer) Name(key string, schema *spec.Schema, aschema *AnalyzedSchema) error { + debugLog("naming inlined schema at %s", key) + + parts := sortref.KeyParts(key) + for _, name := range namesFromKey(parts, aschema, isn.Operations) { + if name == "" { + continue + } + + // create unique name + newName, isOAIGen := uniqifyName(isn.Spec.Definitions, swag.ToJSONName(name)) + + // clone schema + sch := schutils.Clone(schema) + + // replace values on schema + if err := replace.RewriteSchemaToRef(isn.Spec, key, + spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return fmt.Errorf("error while creating definition %q from inline schema: %w", newName, err) + } + + // rewrite any dependent $ref pointing to this place, + // when not already pointing to a top-level definition. + // + // NOTE: this is important if such referers use arbitrary JSON pointers. + an := New(isn.Spec) + for k, v := range an.references.allRefs { + r, erd := replace.DeepestRef(isn.opts.Swagger(), isn.opts.ExpandOpts(false), v) + if erd != nil { + return fmt.Errorf("at %s, %w", k, erd) + } + + if isn.opts.flattenContext != nil { + isn.opts.flattenContext.warnings = append(isn.opts.flattenContext.warnings, r.Warnings...) + } + + if r.Ref.String() != key && (r.Ref.String() != path.Join(definitionsPath, newName) || path.Dir(v.String()) == definitionsPath) { + continue + } + + debugLog("found a $ref to a rewritten schema: %s points to %s", k, v.String()) + + // rewrite $ref to the new target + if err := replace.UpdateRef(isn.Spec, k, + spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return err + } + } + + // NOTE: this extension is currently not used by go-swagger (provided for information only) + sch.AddExtension("x-go-gen-location", GenLocation(parts)) + + // save cloned schema to definitions + schutils.Save(isn.Spec, newName, sch) + + // keep track of created refs + if isn.flattenContext == nil { + continue + } + + debugLog("track created ref: key=%s, newName=%s, isOAIGen=%t", key, newName, isOAIGen) + resolved := false + + if _, ok := isn.flattenContext.newRefs[key]; ok { + resolved = isn.flattenContext.newRefs[key].resolved + } + + isn.flattenContext.newRefs[key] = &newRef{ + key: key, + newName: newName, + path: path.Join(definitionsPath, newName), + isOAIGen: isOAIGen, + resolved: resolved, + schema: sch, + } + } + + return nil +} + +// uniqifyName yields a unique name for a definition +func uniqifyName(definitions spec.Definitions, name string) (string, bool) { + isOAIGen := false + if name == "" { + name = "oaiGen" + isOAIGen = true + } + + if len(definitions) == 0 { + return name, isOAIGen + } + + unq := true + for k := range definitions { + if strings.EqualFold(k, name) { + unq = false + + break + } + } + + if unq { + return name, isOAIGen + } + + name += "OAIGen" + isOAIGen = true + var idx int + unique := name + _, known := definitions[unique] + + for known { + idx++ + unique = fmt.Sprintf("%s%d", name, idx) + _, known = definitions[unique] + } + + return unique, isOAIGen +} + +func namesFromKey(parts sortref.SplitKey, aschema *AnalyzedSchema, operations map[string]operations.OpRef) []string { + var ( + baseNames [][]string + startIndex int + ) + + if parts.IsOperation() { + baseNames, startIndex = namesForOperation(parts, operations) + } + + // definitions + if parts.IsDefinition() { + baseNames, startIndex = namesForDefinition(parts) + } + + result := make([]string, 0, len(baseNames)) + for _, segments := range baseNames { + nm := parts.BuildName(segments, startIndex, partAdder(aschema)) + if nm == "" { + continue + } + + result = append(result, nm) + } + sort.Strings(result) + + return result +} + +func namesForParam(parts sortref.SplitKey, operations map[string]operations.OpRef) ([][]string, int) { + var ( + baseNames [][]string + startIndex int + ) + + piref := parts.PathItemRef() + if piref.String() != "" && parts.IsOperationParam() { + if op, ok := operations[piref.String()]; ok { + startIndex = 5 + baseNames = append(baseNames, []string{op.ID, "params", "body"}) + } + } else if parts.IsSharedOperationParam() { + pref := parts.PathRef() + for k, v := range operations { + if strings.HasPrefix(k, pref.String()) { + startIndex = 4 + baseNames = append(baseNames, []string{v.ID, "params", "body"}) + } + } + } + + return baseNames, startIndex +} + +func namesForOperation(parts sortref.SplitKey, operations map[string]operations.OpRef) ([][]string, int) { + var ( + baseNames [][]string + startIndex int + ) + + // params + if parts.IsOperationParam() || parts.IsSharedOperationParam() { + baseNames, startIndex = namesForParam(parts, operations) + } + + // responses + if parts.IsOperationResponse() { + piref := parts.PathItemRef() + if piref.String() != "" { + if op, ok := operations[piref.String()]; ok { + startIndex = 6 + baseNames = append(baseNames, []string{op.ID, parts.ResponseName(), "body"}) + } + } + } + + return baseNames, startIndex +} + +func namesForDefinition(parts sortref.SplitKey) ([][]string, int) { + nm := parts.DefinitionName() + if nm != "" { + return [][]string{{parts.DefinitionName()}}, 2 + } + + return [][]string{}, 0 +} + +// partAdder knows how to interpret a schema when it comes to build a name from parts +func partAdder(aschema *AnalyzedSchema) sortref.PartAdder { + return func(part string) []string { + segments := make([]string, 0, 2) + + if part == "items" || part == "additionalItems" { + if aschema.IsTuple || aschema.IsTupleWithExtra { + segments = append(segments, "tuple") + } else { + segments = append(segments, "items") + } + + if part == "additionalItems" { + segments = append(segments, part) + } + + return segments + } + + segments = append(segments, part) + + return segments + } +} + +func nameFromRef(ref spec.Ref) string { + u := ref.GetURL() + if u.Fragment != "" { + return swag.ToJSONName(path.Base(u.Fragment)) + } + + if u.Path != "" { + bn := path.Base(u.Path) + if bn != "" && bn != "/" { + ext := path.Ext(bn) + if ext != "" { + return swag.ToJSONName(bn[:len(bn)-len(ext)]) + } + + return swag.ToJSONName(bn) + } + } + + return swag.ToJSONName(strings.ReplaceAll(u.Host, ".", " ")) +} + +// GenLocation indicates from which section of the specification (models or operations) a definition has been created. +// +// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is is provided +// for information only. +func GenLocation(parts sortref.SplitKey) string { + switch { + case parts.IsOperation(): + return "operations" + case parts.IsDefinition(): + return "models" + default: + return "" + } +} diff --git a/test/tools/vendor/github.com/go-openapi/analysis/flatten_options.go b/test/tools/vendor/github.com/go-openapi/analysis/flatten_options.go new file mode 100644 index 0000000000..c5bb97b0a6 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/flatten_options.go @@ -0,0 +1,78 @@ +package analysis + +import ( + "log" + + "github.com/go-openapi/spec" +) + +// FlattenOpts configuration for flattening a swagger specification. +// +// The BasePath parameter is used to locate remote relative $ref found in the specification. +// This path is a file: it points to the location of the root document and may be either a local +// file path or a URL. +// +// If none specified, relative references (e.g. "$ref": "folder/schema.yaml#/definitions/...") +// found in the spec are searched from the current working directory. +type FlattenOpts struct { + Spec *Spec // The analyzed spec to work with + flattenContext *context // Internal context to track flattening activity + + BasePath string // The location of the root document for this spec to resolve relative $ref + + // Flattening options + Expand bool // When true, skip flattening the spec and expand it instead (if Minimal is false) + Minimal bool // When true, do not decompose complex structures such as allOf + Verbose bool // enable some reporting on possible name conflicts detected + RemoveUnused bool // When true, remove unused parameters, responses and definitions after expansion/flattening + ContinueOnError bool // Continue when spec expansion issues are found + + /* Extra keys */ + _ struct{} // require keys +} + +// ExpandOpts creates a spec.ExpandOptions to configure expanding a specification document. +func (f *FlattenOpts) ExpandOpts(skipSchemas bool) *spec.ExpandOptions { + return &spec.ExpandOptions{ + RelativeBase: f.BasePath, + SkipSchemas: skipSchemas, + ContinueOnError: f.ContinueOnError, + } +} + +// Swagger gets the swagger specification for this flatten operation +func (f *FlattenOpts) Swagger() *spec.Swagger { + return f.Spec.spec +} + +// croak logs notifications and warnings about valid, but possibly unwanted constructs resulting +// from flattening a spec +func (f *FlattenOpts) croak() { + if !f.Verbose { + return + } + + reported := make(map[string]bool, len(f.flattenContext.newRefs)) + for _, v := range f.Spec.references.allRefs { + // warns about duplicate handling + for _, r := range f.flattenContext.newRefs { + if r.isOAIGen && r.path == v.String() { + reported[r.newName] = true + } + } + } + + for k := range reported { + log.Printf("warning: duplicate flattened definition name resolved as %s", k) + } + + // warns about possible type mismatches + uniqueMsg := make(map[string]bool) + for _, msg := range f.flattenContext.warnings { + if _, ok := uniqueMsg[msg]; ok { + continue + } + log.Printf("warning: %s", msg) + uniqueMsg[msg] = true + } +} diff --git a/test/tools/vendor/github.com/go-openapi/analysis/internal/debug/debug.go b/test/tools/vendor/github.com/go-openapi/analysis/internal/debug/debug.go new file mode 100644 index 0000000000..ec0fec0229 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/internal/debug/debug.go @@ -0,0 +1,41 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package debug + +import ( + "fmt" + "log" + "os" + "path/filepath" + "runtime" +) + +var ( + output = os.Stdout +) + +// GetLogger provides a prefix debug logger +func GetLogger(prefix string, debug bool) func(string, ...interface{}) { + if debug { + logger := log.New(output, fmt.Sprintf("%s:", prefix), log.LstdFlags) + + return func(msg string, args ...interface{}) { + _, file1, pos1, _ := runtime.Caller(1) + logger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } + } + + return func(msg string, args ...interface{}) {} +} diff --git a/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go b/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go new file mode 100644 index 0000000000..8c9df0580d --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go @@ -0,0 +1,87 @@ +package normalize + +import ( + "net/url" + "path" + "path/filepath" + "strings" + + "github.com/go-openapi/spec" +) + +// RebaseRef rebases a remote ref relative to a base ref. +// +// NOTE: does not support JSONschema ID for $ref (we assume we are working with swagger specs here). +// +// NOTE(windows): +// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) +// * "/ in paths may appear as escape sequences +func RebaseRef(baseRef string, ref string) string { + baseRef, _ = url.PathUnescape(baseRef) + ref, _ = url.PathUnescape(ref) + + if baseRef == "" || baseRef == "." || strings.HasPrefix(baseRef, "#") { + return ref + } + + parts := strings.Split(ref, "#") + + baseParts := strings.Split(baseRef, "#") + baseURL, _ := url.Parse(baseParts[0]) + if strings.HasPrefix(ref, "#") { + if baseURL.Host == "" { + return strings.Join([]string{baseParts[0], parts[1]}, "#") + } + + return strings.Join([]string{baseParts[0], parts[1]}, "#") + } + + refURL, _ := url.Parse(parts[0]) + if refURL.Host != "" || filepath.IsAbs(parts[0]) { + // not rebasing an absolute path + return ref + } + + // there is a relative path + var basePath string + if baseURL.Host != "" { + // when there is a host, standard URI rules apply (with "/") + baseURL.Path = path.Dir(baseURL.Path) + baseURL.Path = path.Join(baseURL.Path, "/"+parts[0]) + + return baseURL.String() + } + + // this is a local relative path + // basePart[0] and parts[0] are local filesystem directories/files + basePath = filepath.Dir(baseParts[0]) + relPath := filepath.Join(basePath, string(filepath.Separator)+parts[0]) + if len(parts) > 1 { + return strings.Join([]string{relPath, parts[1]}, "#") + } + + return relPath +} + +// Path renders absolute path on remote file refs +// +// NOTE(windows): +// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) +// * "/ in paths may appear as escape sequences +func Path(ref spec.Ref, basePath string) string { + uri, _ := url.PathUnescape(ref.String()) + if ref.HasFragmentOnly || filepath.IsAbs(uri) { + return uri + } + + refURL, _ := url.Parse(uri) + if refURL.Host != "" { + return uri + } + + parts := strings.Split(uri, "#") + // BasePath, parts[0] are local filesystem directories, guaranteed to be absolute at this stage + parts[0] = filepath.Join(filepath.Dir(basePath), parts[0]) + + return strings.Join(parts, "#") +} diff --git a/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go b/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go new file mode 100644 index 0000000000..7f3a2b8717 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go @@ -0,0 +1,90 @@ +package operations + +import ( + "path" + "sort" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +// AllOpRefsByRef returns an index of sortable operations +func AllOpRefsByRef(specDoc Provider, operationIDs []string) map[string]OpRef { + return OpRefsByRef(GatherOperations(specDoc, operationIDs)) +} + +// OpRefsByRef indexes a map of sortable operations +func OpRefsByRef(oprefs map[string]OpRef) map[string]OpRef { + result := make(map[string]OpRef, len(oprefs)) + for _, v := range oprefs { + result[v.Ref.String()] = v + } + + return result +} + +// OpRef is an indexable, sortable operation +type OpRef struct { + Method string + Path string + Key string + ID string + Op *spec.Operation + Ref spec.Ref +} + +// OpRefs is a sortable collection of operations +type OpRefs []OpRef + +func (o OpRefs) Len() int { return len(o) } +func (o OpRefs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o OpRefs) Less(i, j int) bool { return o[i].Key < o[j].Key } + +// Provider knows how to collect operations from a spec +type Provider interface { + Operations() map[string]map[string]*spec.Operation +} + +// GatherOperations builds a map of sorted operations from a spec +func GatherOperations(specDoc Provider, operationIDs []string) map[string]OpRef { + var oprefs OpRefs + + for method, pathItem := range specDoc.Operations() { + for pth, operation := range pathItem { + vv := *operation + oprefs = append(oprefs, OpRef{ + Key: swag.ToGoName(strings.ToLower(method) + " " + pth), + Method: method, + Path: pth, + ID: vv.ID, + Op: &vv, + Ref: spec.MustCreateRef("#" + path.Join("/paths", jsonpointer.Escape(pth), method)), + }) + } + } + + sort.Sort(oprefs) + + operations := make(map[string]OpRef) + for _, opr := range oprefs { + nm := opr.ID + if nm == "" { + nm = opr.Key + } + + oo, found := operations[nm] + if found && oo.Method != opr.Method && oo.Path != opr.Path { + nm = opr.Key + } + + if len(operationIDs) == 0 || swag.ContainsStrings(operationIDs, opr.ID) || swag.ContainsStrings(operationIDs, nm) { + opr.ID = nm + opr.Op.ID = nm + operations[nm] = opr + } + } + + return operations +} diff --git a/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go b/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go new file mode 100644 index 0000000000..26c2a05a31 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go @@ -0,0 +1,434 @@ +package replace + +import ( + "fmt" + "net/url" + "os" + "path" + "strconv" + + "github.com/go-openapi/analysis/internal/debug" + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" +) + +const definitionsPath = "#/definitions" + +var debugLog = debug.GetLogger("analysis/flatten/replace", os.Getenv("SWAGGER_DEBUG") != "") + +// RewriteSchemaToRef replaces a schema with a Ref +func RewriteSchemaToRef(sp *spec.Swagger, key string, ref spec.Ref) error { + debugLog("rewriting schema to ref for %s with %s", key, ref.String()) + _, value, err := getPointerFromKey(sp, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *spec.Schema: + return rewriteParentRef(sp, key, ref) + + case spec.Schema: + return rewriteParentRef(sp, key, ref) + + case *spec.SchemaOrArray: + if refable.Schema != nil { + refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + } + + case *spec.SchemaOrBool: + if refable.Schema != nil { + refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + } + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +func rewriteParentRef(sp *spec.Swagger, key string, ref spec.Ref) error { + parent, entry, pvalue, err := getParentFromKey(sp, key) + if err != nil { + return err + } + + debugLog("rewriting holder for %T", pvalue) + switch container := pvalue.(type) { + case spec.Response: + if err := rewriteParentRef(sp, "#"+parent, ref); err != nil { + return err + } + + case *spec.Response: + container.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case *spec.Responses: + statusCode, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", key[1:], err) + } + resp := container.StatusCodeResponses[statusCode] + resp.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container.StatusCodeResponses[statusCode] = resp + + case map[string]spec.Response: + resp := container[entry] + resp.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container[entry] = resp + + case spec.Parameter: + if err := rewriteParentRef(sp, "#"+parent, ref); err != nil { + return err + } + + case map[string]spec.Parameter: + param := container[entry] + param.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container[entry] = param + + case []spec.Parameter: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", key[1:], err) + } + param := container[idx] + param.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container[idx] = param + + case spec.Definitions: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case map[string]spec.Schema: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case []spec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", key[1:], err) + } + container[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case *spec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", key[1:], err) + } + container.Schemas[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case spec.SchemaProperties: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + + default: + return fmt.Errorf("unhandled parent schema rewrite %s (%T)", key, pvalue) + } + + return nil +} + +// getPointerFromKey retrieves the content of the JSON pointer "key" +func getPointerFromKey(sp interface{}, key string) (string, interface{}, error) { + switch sp.(type) { + case *spec.Schema: + case *spec.Swagger: + default: + panic("unexpected type used in getPointerFromKey") + } + if key == "#/" { + return "", sp, nil + } + // unescape chars in key, e.g. "{}" from path params + pth, _ := url.PathUnescape(key[1:]) + ptr, err := jsonpointer.New(pth) + if err != nil { + return "", nil, err + } + + value, _, err := ptr.Get(sp) + if err != nil { + debugLog("error when getting key: %s with path: %s", key, pth) + + return "", nil, err + } + + return pth, value, nil +} + +// getParentFromKey retrieves the container of the JSON pointer "key" +func getParentFromKey(sp interface{}, key string) (string, string, interface{}, error) { + switch sp.(type) { + case *spec.Schema: + case *spec.Swagger: + default: + panic("unexpected type used in getPointerFromKey") + } + // unescape chars in key, e.g. "{}" from path params + pth, _ := url.PathUnescape(key[1:]) + + parent, entry := path.Dir(pth), path.Base(pth) + debugLog("getting schema holder at: %s, with entry: %s", parent, entry) + + pptr, err := jsonpointer.New(parent) + if err != nil { + return "", "", nil, err + } + pvalue, _, err := pptr.Get(sp) + if err != nil { + return "", "", nil, fmt.Errorf("can't get parent for %s: %w", parent, err) + } + + return parent, entry, pvalue, nil +} + +// UpdateRef replaces a ref by another one +func UpdateRef(sp interface{}, key string, ref spec.Ref) error { + switch sp.(type) { + case *spec.Schema: + case *spec.Swagger: + default: + panic("unexpected type used in getPointerFromKey") + } + debugLog("updating ref for %s with %s", key, ref.String()) + pth, value, err := getPointerFromKey(sp, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *spec.Schema: + refable.Ref = ref + case *spec.SchemaOrArray: + if refable.Schema != nil { + refable.Schema.Ref = ref + } + case *spec.SchemaOrBool: + if refable.Schema != nil { + refable.Schema.Ref = ref + } + case spec.Schema: + debugLog("rewriting holder for %T", refable) + _, entry, pvalue, erp := getParentFromKey(sp, key) + if erp != nil { + return err + } + switch container := pvalue.(type) { + case spec.Definitions: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case map[string]spec.Schema: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case []spec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", pth, err) + } + container[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case *spec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", pth, err) + } + container.Schemas[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case spec.SchemaProperties: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + + default: + return fmt.Errorf("unhandled container type at %s: %T", key, value) + } + + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +// UpdateRefWithSchema replaces a ref with a schema (i.e. re-inline schema) +func UpdateRefWithSchema(sp *spec.Swagger, key string, sch *spec.Schema) error { + debugLog("updating ref for %s with schema", key) + pth, value, err := getPointerFromKey(sp, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *spec.Schema: + *refable = *sch + case spec.Schema: + _, entry, pvalue, erp := getParentFromKey(sp, key) + if erp != nil { + return err + } + switch container := pvalue.(type) { + case spec.Definitions: + container[entry] = *sch + + case map[string]spec.Schema: + container[entry] = *sch + + case []spec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", pth, err) + } + container[idx] = *sch + + case *spec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", pth, err) + } + container.Schemas[idx] = *sch + + case spec.SchemaProperties: + container[entry] = *sch + + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + + default: + return fmt.Errorf("unhandled type for parent of [%s]: %T", key, value) + } + case *spec.SchemaOrArray: + *refable.Schema = *sch + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + case *spec.SchemaOrBool: + *refable.Schema = *sch + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +// DeepestRefResult holds the results from DeepestRef analysis +type DeepestRefResult struct { + Ref spec.Ref + Schema *spec.Schema + Warnings []string +} + +// DeepestRef finds the first definition ref, from a cascade of nested refs which are not definitions. +// - if no definition is found, returns the deepest ref. +// - pointers to external files are expanded +// +// NOTE: all external $ref's are assumed to be already expanded at this stage. +func DeepestRef(sp *spec.Swagger, opts *spec.ExpandOptions, ref spec.Ref) (*DeepestRefResult, error) { + if !ref.HasFragmentOnly { + // we found an external $ref, which is odd at this stage: + // do nothing on external $refs + return &DeepestRefResult{Ref: ref}, nil + } + + currentRef := ref + visited := make(map[string]bool, 64) + warnings := make([]string, 0, 2) + +DOWNREF: + for currentRef.String() != "" { + if path.Dir(currentRef.String()) == definitionsPath { + // this is a top-level definition: stop here and return this ref + return &DeepestRefResult{Ref: currentRef}, nil + } + + if _, beenThere := visited[currentRef.String()]; beenThere { + return nil, + fmt.Errorf("cannot resolve cyclic chain of pointers under %s", currentRef.String()) + } + + visited[currentRef.String()] = true + value, _, err := currentRef.GetPointer().Get(sp) + if err != nil { + return nil, err + } + + switch refable := value.(type) { + case *spec.Schema: + if refable.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Ref + + case spec.Schema: + if refable.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Ref + + case *spec.SchemaOrArray: + if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Schema.Ref + + case *spec.SchemaOrBool: + if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Schema.Ref + + case spec.Response: + // a pointer points to a schema initially marshalled in responses section... + // Attempt to convert this to a schema. If this fails, the spec is invalid + asJSON, _ := refable.MarshalJSON() + var asSchema spec.Schema + + err := asSchema.UnmarshalJSON(asJSON) + if err != nil { + return nil, + fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", + currentRef.String(), value) + } + warnings = append(warnings, fmt.Sprintf("found $ref %q (response) interpreted as schema", currentRef.String())) + + if asSchema.Ref.String() == "" { + break DOWNREF + } + currentRef = asSchema.Ref + + case spec.Parameter: + // a pointer points to a schema initially marshalled in parameters section... + // Attempt to convert this to a schema. If this fails, the spec is invalid + asJSON, _ := refable.MarshalJSON() + var asSchema spec.Schema + if err := asSchema.UnmarshalJSON(asJSON); err != nil { + return nil, + fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", + currentRef.String(), value) + } + + warnings = append(warnings, fmt.Sprintf("found $ref %q (parameter) interpreted as schema", currentRef.String())) + + if asSchema.Ref.String() == "" { + break DOWNREF + } + currentRef = asSchema.Ref + + default: + return nil, + fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T", + currentRef.String(), value) + } + } + + // assess what schema we're ending with + sch, erv := spec.ResolveRefWithBase(sp, ¤tRef, opts) + if erv != nil { + return nil, erv + } + + if sch == nil { + return nil, fmt.Errorf("no schema found at %s", currentRef.String()) + } + + return &DeepestRefResult{Ref: currentRef, Schema: sch, Warnings: warnings}, nil +} diff --git a/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go b/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go new file mode 100644 index 0000000000..4590236e68 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go @@ -0,0 +1,29 @@ +// Package schutils provides tools to save or clone a schema +// when flattening a spec. +package schutils + +import ( + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +// Save registers a schema as an entry in spec #/definitions +func Save(sp *spec.Swagger, name string, schema *spec.Schema) { + if schema == nil { + return + } + + if sp.Definitions == nil { + sp.Definitions = make(map[string]spec.Schema, 150) + } + + sp.Definitions[name] = *schema +} + +// Clone deep-clones a schema +func Clone(schema *spec.Schema) *spec.Schema { + var sch spec.Schema + _ = swag.FromDynamicJSON(schema, &sch) + + return &sch +} diff --git a/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go b/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go new file mode 100644 index 0000000000..18e552eadc --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go @@ -0,0 +1,201 @@ +package sortref + +import ( + "net/http" + "path" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" +) + +const ( + paths = "paths" + responses = "responses" + parameters = "parameters" + definitions = "definitions" +) + +var ( + ignoredKeys map[string]struct{} + validMethods map[string]struct{} +) + +func init() { + ignoredKeys = map[string]struct{}{ + "schema": {}, + "properties": {}, + "not": {}, + "anyOf": {}, + "oneOf": {}, + } + + validMethods = map[string]struct{}{ + "GET": {}, + "HEAD": {}, + "OPTIONS": {}, + "PATCH": {}, + "POST": {}, + "PUT": {}, + "DELETE": {}, + } +} + +// Key represent a key item constructed from /-separated segments +type Key struct { + Segments int + Key string +} + +// Keys is a sortable collable collection of Keys +type Keys []Key + +func (k Keys) Len() int { return len(k) } +func (k Keys) Swap(i, j int) { k[i], k[j] = k[j], k[i] } +func (k Keys) Less(i, j int) bool { + return k[i].Segments > k[j].Segments || (k[i].Segments == k[j].Segments && k[i].Key < k[j].Key) +} + +// KeyParts construct a SplitKey with all its /-separated segments decomposed. It is sortable. +func KeyParts(key string) SplitKey { + var res []string + for _, part := range strings.Split(key[1:], "/") { + if part != "" { + res = append(res, jsonpointer.Unescape(part)) + } + } + + return res +} + +// SplitKey holds of the parts of a /-separated key, soi that their location may be determined. +type SplitKey []string + +// IsDefinition is true when the split key is in the #/definitions section of a spec +func (s SplitKey) IsDefinition() bool { + return len(s) > 1 && s[0] == definitions +} + +// DefinitionName yields the name of the definition +func (s SplitKey) DefinitionName() string { + if !s.IsDefinition() { + return "" + } + + return s[1] +} + +func (s SplitKey) isKeyName(i int) bool { + if i <= 0 { + return false + } + + count := 0 + for idx := i - 1; idx > 0; idx-- { + if s[idx] != "properties" { + break + } + count++ + } + + return count%2 != 0 +} + +// PartAdder know how to construct the components of a new name +type PartAdder func(string) []string + +// BuildName builds a name from segments +func (s SplitKey) BuildName(segments []string, startIndex int, adder PartAdder) string { + for i, part := range s[startIndex:] { + if _, ignored := ignoredKeys[part]; !ignored || s.isKeyName(startIndex+i) { + segments = append(segments, adder(part)...) + } + } + + return strings.Join(segments, " ") +} + +// IsOperation is true when the split key is in the operations section +func (s SplitKey) IsOperation() bool { + return len(s) > 1 && s[0] == paths +} + +// IsSharedOperationParam is true when the split key is in the parameters section of a path +func (s SplitKey) IsSharedOperationParam() bool { + return len(s) > 2 && s[0] == paths && s[2] == parameters +} + +// IsSharedParam is true when the split key is in the #/parameters section of a spec +func (s SplitKey) IsSharedParam() bool { + return len(s) > 1 && s[0] == parameters +} + +// IsOperationParam is true when the split key is in the parameters section of an operation +func (s SplitKey) IsOperationParam() bool { + return len(s) > 3 && s[0] == paths && s[3] == parameters +} + +// IsOperationResponse is true when the split key is in the responses section of an operation +func (s SplitKey) IsOperationResponse() bool { + return len(s) > 3 && s[0] == paths && s[3] == responses +} + +// IsSharedResponse is true when the split key is in the #/responses section of a spec +func (s SplitKey) IsSharedResponse() bool { + return len(s) > 1 && s[0] == responses +} + +// IsDefaultResponse is true when the split key is the default response for an operation +func (s SplitKey) IsDefaultResponse() bool { + return len(s) > 4 && s[0] == paths && s[3] == responses && s[4] == "default" +} + +// IsStatusCodeResponse is true when the split key is an operation response with a status code +func (s SplitKey) IsStatusCodeResponse() bool { + isInt := func() bool { + _, err := strconv.Atoi(s[4]) + + return err == nil + } + + return len(s) > 4 && s[0] == paths && s[3] == responses && isInt() +} + +// ResponseName yields either the status code or "Default" for a response +func (s SplitKey) ResponseName() string { + if s.IsStatusCodeResponse() { + code, _ := strconv.Atoi(s[4]) + + return http.StatusText(code) + } + + if s.IsDefaultResponse() { + return "Default" + } + + return "" +} + +// PathItemRef constructs a $ref object from a split key of the form /{path}/{method} +func (s SplitKey) PathItemRef() spec.Ref { + if len(s) < 3 { + return spec.Ref{} + } + + pth, method := s[1], s[2] + if _, isValidMethod := validMethods[strings.ToUpper(method)]; !isValidMethod && !strings.HasPrefix(method, "x-") { + return spec.Ref{} + } + + return spec.MustCreateRef("#" + path.Join("/", paths, jsonpointer.Escape(pth), strings.ToUpper(method))) +} + +// PathRef constructs a $ref object from a split key of the form /paths/{reference} +func (s SplitKey) PathRef() spec.Ref { + if !s.IsOperation() { + return spec.Ref{} + } + + return spec.MustCreateRef("#" + path.Join("/", paths, jsonpointer.Escape(s[1]))) +} diff --git a/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go b/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go new file mode 100644 index 0000000000..73243df87f --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go @@ -0,0 +1,141 @@ +package sortref + +import ( + "reflect" + "sort" + "strings" + + "github.com/go-openapi/analysis/internal/flatten/normalize" + "github.com/go-openapi/spec" +) + +var depthGroupOrder = []string{ + "sharedParam", "sharedResponse", "sharedOpParam", "opParam", "codeResponse", "defaultResponse", "definition", +} + +type mapIterator struct { + len int + mapIter *reflect.MapIter +} + +func (i *mapIterator) Next() bool { + return i.mapIter.Next() +} + +func (i *mapIterator) Len() int { + return i.len +} + +func (i *mapIterator) Key() string { + return i.mapIter.Key().String() +} + +func mustMapIterator(anyMap interface{}) *mapIterator { + val := reflect.ValueOf(anyMap) + + return &mapIterator{mapIter: val.MapRange(), len: val.Len()} +} + +// DepthFirst sorts a map of anything. It groups keys by category +// (shared params, op param, statuscode response, default response, definitions) +// sort groups internally by number of parts in the key and lexical names +// flatten groups into a single list of keys +func DepthFirst(in interface{}) []string { + iterator := mustMapIterator(in) + sorted := make([]string, 0, iterator.Len()) + grouped := make(map[string]Keys, iterator.Len()) + + for iterator.Next() { + k := iterator.Key() + split := KeyParts(k) + var pk string + + if split.IsSharedOperationParam() { + pk = "sharedOpParam" + } + if split.IsOperationParam() { + pk = "opParam" + } + if split.IsStatusCodeResponse() { + pk = "codeResponse" + } + if split.IsDefaultResponse() { + pk = "defaultResponse" + } + if split.IsDefinition() { + pk = "definition" + } + if split.IsSharedParam() { + pk = "sharedParam" + } + if split.IsSharedResponse() { + pk = "sharedResponse" + } + grouped[pk] = append(grouped[pk], Key{Segments: len(split), Key: k}) + } + + for _, pk := range depthGroupOrder { + res := grouped[pk] + sort.Sort(res) + + for _, v := range res { + sorted = append(sorted, v.Key) + } + } + + return sorted +} + +// topMostRefs is able to sort refs by hierarchical then lexicographic order, +// yielding refs ordered breadth-first. +type topmostRefs []string + +func (k topmostRefs) Len() int { return len(k) } +func (k topmostRefs) Swap(i, j int) { k[i], k[j] = k[j], k[i] } +func (k topmostRefs) Less(i, j int) bool { + li, lj := len(strings.Split(k[i], "/")), len(strings.Split(k[j], "/")) + if li == lj { + return k[i] < k[j] + } + + return li < lj +} + +// TopmostFirst sorts references by depth +func TopmostFirst(refs []string) []string { + res := topmostRefs(refs) + sort.Sort(res) + + return res +} + +// RefRevIdx is a reverse index for references +type RefRevIdx struct { + Ref spec.Ref + Keys []string +} + +// ReverseIndex builds a reverse index for references in schemas +func ReverseIndex(schemas map[string]spec.Ref, basePath string) map[string]RefRevIdx { + collected := make(map[string]RefRevIdx) + for key, schRef := range schemas { + // normalize paths before sorting, + // so we get together keys that are from the same external file + normalizedPath := normalize.Path(schRef, basePath) + + entry, ok := collected[normalizedPath] + if ok { + entry.Keys = append(entry.Keys, key) + collected[normalizedPath] = entry + + continue + } + + collected[normalizedPath] = RefRevIdx{ + Ref: schRef, + Keys: []string{key}, + } + } + + return collected +} diff --git a/test/tools/vendor/github.com/go-openapi/analysis/mixin.go b/test/tools/vendor/github.com/go-openapi/analysis/mixin.go new file mode 100644 index 0000000000..b253052648 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/mixin.go @@ -0,0 +1,515 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + "reflect" + + "github.com/go-openapi/spec" +) + +// Mixin modifies the primary swagger spec by adding the paths and +// definitions from the mixin specs. Top level parameters and +// responses from the mixins are also carried over. Operation id +// collisions are avoided by appending "Mixin" but only if +// needed. +// +// The following parts of primary are subject to merge, filling empty details +// - Info +// - BasePath +// - Host +// - ExternalDocs +// +// Consider calling FixEmptyResponseDescriptions() on the modified primary +// if you read them from storage and they are valid to start with. +// +// Entries in "paths", "definitions", "parameters" and "responses" are +// added to the primary in the order of the given mixins. If the entry +// already exists in primary it is skipped with a warning message. +// +// The count of skipped entries (from collisions) is returned so any +// deviation from the number expected can flag a warning in your build +// scripts. Carefully review the collisions before accepting them; +// consider renaming things if possible. +// +// No key normalization takes place (paths, type defs, +// etc). Ensure they are canonical if your downstream tools do +// key normalization of any form. +// +// Merging schemes (http, https), and consumers/producers do not account for +// collisions. +func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { + skipped := make([]string, 0, len(mixins)) + opIds := getOpIds(primary) + initPrimary(primary) + + for i, m := range mixins { + skipped = append(skipped, mergeSwaggerProps(primary, m)...) + + skipped = append(skipped, mergeConsumes(primary, m)...) + + skipped = append(skipped, mergeProduces(primary, m)...) + + skipped = append(skipped, mergeTags(primary, m)...) + + skipped = append(skipped, mergeSchemes(primary, m)...) + + skipped = append(skipped, mergeSecurityDefinitions(primary, m)...) + + skipped = append(skipped, mergeSecurityRequirements(primary, m)...) + + skipped = append(skipped, mergeDefinitions(primary, m)...) + + // merging paths requires a map of operationIDs to work with + skipped = append(skipped, mergePaths(primary, m, opIds, i)...) + + skipped = append(skipped, mergeParameters(primary, m)...) + + skipped = append(skipped, mergeResponses(primary, m)...) + } + + return skipped +} + +// getOpIds extracts all the paths..operationIds from the given +// spec and returns them as the keys in a map with 'true' values. +func getOpIds(s *spec.Swagger) map[string]bool { + rv := make(map[string]bool) + if s.Paths == nil { + return rv + } + + for _, v := range s.Paths.Paths { + piops := pathItemOps(v) + + for _, op := range piops { + rv[op.ID] = true + } + } + + return rv +} + +func pathItemOps(p spec.PathItem) []*spec.Operation { + var rv []*spec.Operation + rv = appendOp(rv, p.Get) + rv = appendOp(rv, p.Put) + rv = appendOp(rv, p.Post) + rv = appendOp(rv, p.Delete) + rv = appendOp(rv, p.Head) + rv = appendOp(rv, p.Patch) + + return rv +} + +func appendOp(ops []*spec.Operation, op *spec.Operation) []*spec.Operation { + if op == nil { + return ops + } + + return append(ops, op) +} + +func mergeSecurityDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.SecurityDefinitions { + if _, exists := primary.SecurityDefinitions[k]; exists { + warn := fmt.Sprintf( + "SecurityDefinitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + + primary.SecurityDefinitions[k] = v + } + + return +} + +func mergeSecurityRequirements(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for _, v := range m.Security { + found := false + for _, vv := range primary.Security { + if reflect.DeepEqual(v, vv) { + found = true + + break + } + } + + if found { + warn := fmt.Sprintf( + "Security requirement: '%v' already exists in primary or higher priority mixin, skipping\n", v) + skipped = append(skipped, warn) + + continue + } + primary.Security = append(primary.Security, v) + } + + return +} + +func mergeDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Definitions { + // assume name collisions represent IDENTICAL type. careful. + if _, exists := primary.Definitions[k]; exists { + warn := fmt.Sprintf( + "definitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + primary.Definitions[k] = v + } + + return +} + +func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIds map[string]bool, mixIndex int) (skipped []string) { + if m.Paths != nil { + for k, v := range m.Paths.Paths { + if _, exists := primary.Paths.Paths[k]; exists { + warn := fmt.Sprintf( + "paths entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + + // Swagger requires that operationIds be + // unique within a spec. If we find a + // collision we append "Mixin0" to the + // operatoinId we are adding, where 0 is mixin + // index. We assume that operationIds with + // all the proivded specs are already unique. + piops := pathItemOps(v) + for _, piop := range piops { + if opIds[piop.ID] { + piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", mixIndex) + } + opIds[piop.ID] = true + } + primary.Paths.Paths[k] = v + } + } + + return +} + +func mergeParameters(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Parameters { + // could try to rename on conflict but would + // have to fix $refs in the mixin. Complain + // for now + if _, exists := primary.Parameters[k]; exists { + warn := fmt.Sprintf( + "top level parameters entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + primary.Parameters[k] = v + } + + return +} + +func mergeResponses(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Responses { + // could try to rename on conflict but would + // have to fix $refs in the mixin. Complain + // for now + if _, exists := primary.Responses[k]; exists { + warn := fmt.Sprintf( + "top level responses entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + primary.Responses[k] = v + } + + return skipped +} + +func mergeConsumes(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Consumes { + found := false + for _, vv := range primary.Consumes { + if v == vv { + found = true + + break + } + } + + if found { + // no warning here: we just skip it + continue + } + primary.Consumes = append(primary.Consumes, v) + } + + return []string{} +} + +func mergeProduces(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Produces { + found := false + for _, vv := range primary.Produces { + if v == vv { + found = true + + break + } + } + + if found { + // no warning here: we just skip it + continue + } + primary.Produces = append(primary.Produces, v) + } + + return []string{} +} + +func mergeTags(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for _, v := range m.Tags { + found := false + for _, vv := range primary.Tags { + if v.Name == vv.Name { + found = true + + break + } + } + + if found { + warn := fmt.Sprintf( + "top level tags entry with name '%v' already exists in primary or higher priority mixin, skipping\n", + v.Name, + ) + skipped = append(skipped, warn) + + continue + } + + primary.Tags = append(primary.Tags, v) + } + + return +} + +func mergeSchemes(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Schemes { + found := false + for _, vv := range primary.Schemes { + if v == vv { + found = true + + break + } + } + + if found { + // no warning here: we just skip it + continue + } + primary.Schemes = append(primary.Schemes, v) + } + + return []string{} +} + +func mergeSwaggerProps(primary *spec.Swagger, m *spec.Swagger) []string { + var skipped, skippedInfo, skippedDocs []string + + primary.Extensions, skipped = mergeExtensions(primary.Extensions, m.Extensions) + + // merging details in swagger top properties + if primary.Host == "" { + primary.Host = m.Host + } + + if primary.BasePath == "" { + primary.BasePath = m.BasePath + } + + if primary.Info == nil { + primary.Info = m.Info + } else if m.Info != nil { + skippedInfo = mergeInfo(primary.Info, m.Info) + skipped = append(skipped, skippedInfo...) + } + + if primary.ExternalDocs == nil { + primary.ExternalDocs = m.ExternalDocs + } else if m != nil { + skippedDocs = mergeExternalDocs(primary.ExternalDocs, m.ExternalDocs) + skipped = append(skipped, skippedDocs...) + } + + return skipped +} + +// nolint: unparam +func mergeExternalDocs(primary *spec.ExternalDocumentation, m *spec.ExternalDocumentation) []string { + if primary.Description == "" { + primary.Description = m.Description + } + + if primary.URL == "" { + primary.URL = m.URL + } + + return nil +} + +func mergeInfo(primary *spec.Info, m *spec.Info) []string { + var sk, skipped []string + + primary.Extensions, sk = mergeExtensions(primary.Extensions, m.Extensions) + skipped = append(skipped, sk...) + + if primary.Description == "" { + primary.Description = m.Description + } + + if primary.Title == "" { + primary.Description = m.Description + } + + if primary.TermsOfService == "" { + primary.TermsOfService = m.TermsOfService + } + + if primary.Version == "" { + primary.Version = m.Version + } + + if primary.Contact == nil { + primary.Contact = m.Contact + } else if m.Contact != nil { + var csk []string + primary.Contact.Extensions, csk = mergeExtensions(primary.Contact.Extensions, m.Contact.Extensions) + skipped = append(skipped, csk...) + + if primary.Contact.Name == "" { + primary.Contact.Name = m.Contact.Name + } + + if primary.Contact.URL == "" { + primary.Contact.URL = m.Contact.URL + } + + if primary.Contact.Email == "" { + primary.Contact.Email = m.Contact.Email + } + } + + if primary.License == nil { + primary.License = m.License + } else if m.License != nil { + var lsk []string + primary.License.Extensions, lsk = mergeExtensions(primary.License.Extensions, m.License.Extensions) + skipped = append(skipped, lsk...) + + if primary.License.Name == "" { + primary.License.Name = m.License.Name + } + + if primary.License.URL == "" { + primary.License.URL = m.License.URL + } + } + + return skipped +} + +func mergeExtensions(primary spec.Extensions, m spec.Extensions) (result spec.Extensions, skipped []string) { + if primary == nil { + result = m + + return + } + + if m == nil { + result = primary + + return + } + + result = primary + for k, v := range m { + if _, found := primary[k]; found { + skipped = append(skipped, k) + + continue + } + + primary[k] = v + } + + return +} + +func initPrimary(primary *spec.Swagger) { + if primary.SecurityDefinitions == nil { + primary.SecurityDefinitions = make(map[string]*spec.SecurityScheme) + } + + if primary.Security == nil { + primary.Security = make([]map[string][]string, 0, 10) + } + + if primary.Produces == nil { + primary.Produces = make([]string, 0, 10) + } + + if primary.Consumes == nil { + primary.Consumes = make([]string, 0, 10) + } + + if primary.Tags == nil { + primary.Tags = make([]spec.Tag, 0, 10) + } + + if primary.Schemes == nil { + primary.Schemes = make([]string, 0, 10) + } + + if primary.Paths == nil { + primary.Paths = &spec.Paths{Paths: make(map[string]spec.PathItem)} + } + + if primary.Paths.Paths == nil { + primary.Paths.Paths = make(map[string]spec.PathItem) + } + + if primary.Definitions == nil { + primary.Definitions = make(spec.Definitions) + } + + if primary.Parameters == nil { + primary.Parameters = make(map[string]spec.Parameter) + } + + if primary.Responses == nil { + primary.Responses = make(map[string]spec.Response) + } +} diff --git a/test/tools/vendor/github.com/go-openapi/analysis/schema.go b/test/tools/vendor/github.com/go-openapi/analysis/schema.go new file mode 100644 index 0000000000..fc055095cb --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/analysis/schema.go @@ -0,0 +1,256 @@ +package analysis + +import ( + "fmt" + + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +// SchemaOpts configures the schema analyzer +type SchemaOpts struct { + Schema *spec.Schema + Root interface{} + BasePath string + _ struct{} +} + +// Schema analysis, will classify the schema according to known +// patterns. +func Schema(opts SchemaOpts) (*AnalyzedSchema, error) { + if opts.Schema == nil { + return nil, fmt.Errorf("no schema to analyze") + } + + a := &AnalyzedSchema{ + schema: opts.Schema, + root: opts.Root, + basePath: opts.BasePath, + } + + a.initializeFlags() + a.inferKnownType() + a.inferEnum() + a.inferBaseType() + + if err := a.inferMap(); err != nil { + return nil, err + } + if err := a.inferArray(); err != nil { + return nil, err + } + + a.inferTuple() + + if err := a.inferFromRef(); err != nil { + return nil, err + } + + a.inferSimpleSchema() + + return a, nil +} + +// AnalyzedSchema indicates what the schema represents +type AnalyzedSchema struct { + schema *spec.Schema + root interface{} + basePath string + + hasProps bool + hasAllOf bool + hasItems bool + hasAdditionalProps bool + hasAdditionalItems bool + hasRef bool + + IsKnownType bool + IsSimpleSchema bool + IsArray bool + IsSimpleArray bool + IsMap bool + IsSimpleMap bool + IsExtendedObject bool + IsTuple bool + IsTupleWithExtra bool + IsBaseType bool + IsEnum bool +} + +// Inherits copies value fields from other onto this schema +func (a *AnalyzedSchema) inherits(other *AnalyzedSchema) { + if other == nil { + return + } + a.hasProps = other.hasProps + a.hasAllOf = other.hasAllOf + a.hasItems = other.hasItems + a.hasAdditionalItems = other.hasAdditionalItems + a.hasAdditionalProps = other.hasAdditionalProps + a.hasRef = other.hasRef + + a.IsKnownType = other.IsKnownType + a.IsSimpleSchema = other.IsSimpleSchema + a.IsArray = other.IsArray + a.IsSimpleArray = other.IsSimpleArray + a.IsMap = other.IsMap + a.IsSimpleMap = other.IsSimpleMap + a.IsExtendedObject = other.IsExtendedObject + a.IsTuple = other.IsTuple + a.IsTupleWithExtra = other.IsTupleWithExtra + a.IsBaseType = other.IsBaseType + a.IsEnum = other.IsEnum +} + +func (a *AnalyzedSchema) inferFromRef() error { + if a.hasRef { + sch := new(spec.Schema) + sch.Ref = a.schema.Ref + err := spec.ExpandSchema(sch, a.root, nil) + if err != nil { + return err + } + rsch, err := Schema(SchemaOpts{ + Schema: sch, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + // NOTE(fredbi): currently the only cause for errors is + // unresolved ref. Since spec.ExpandSchema() expands the + // schema recursively, there is no chance to get there, + // until we add more causes for error in this schema analysis. + return err + } + a.inherits(rsch) + } + + return nil +} + +func (a *AnalyzedSchema) inferSimpleSchema() { + a.IsSimpleSchema = a.IsKnownType || a.IsSimpleArray || a.IsSimpleMap +} + +func (a *AnalyzedSchema) inferKnownType() { + tpe := a.schema.Type + format := a.schema.Format + a.IsKnownType = tpe.Contains("boolean") || + tpe.Contains("integer") || + tpe.Contains("number") || + tpe.Contains("string") || + (format != "" && strfmt.Default.ContainsName(format)) || + (a.isObjectType() && !a.hasProps && !a.hasAllOf && !a.hasAdditionalProps && !a.hasAdditionalItems) +} + +func (a *AnalyzedSchema) inferMap() error { + if !a.isObjectType() { + return nil + } + + hasExtra := a.hasProps || a.hasAllOf + a.IsMap = a.hasAdditionalProps && !hasExtra + a.IsExtendedObject = a.hasAdditionalProps && hasExtra + + if !a.IsMap { + return nil + } + + // maps + if a.schema.AdditionalProperties.Schema != nil { + msch, err := Schema(SchemaOpts{ + Schema: a.schema.AdditionalProperties.Schema, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + return err + } + a.IsSimpleMap = msch.IsSimpleSchema + } else if a.schema.AdditionalProperties.Allows { + a.IsSimpleMap = true + } + + return nil +} + +func (a *AnalyzedSchema) inferArray() error { + // an array has Items defined as an object schema, otherwise we qualify this JSON array as a tuple + // (yes, even if the Items array contains only one element). + // arrays in JSON schema may be unrestricted (i.e no Items specified). + // Note that arrays in Swagger MUST have Items. Nonetheless, we analyze unrestricted arrays. + // + // NOTE: the spec package misses the distinction between: + // items: [] and items: {}, so we consider both arrays here. + a.IsArray = a.isArrayType() && (a.schema.Items == nil || a.schema.Items.Schemas == nil) + if a.IsArray && a.hasItems { + if a.schema.Items.Schema != nil { + itsch, err := Schema(SchemaOpts{ + Schema: a.schema.Items.Schema, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + return err + } + + a.IsSimpleArray = itsch.IsSimpleSchema + } + } + + if a.IsArray && !a.hasItems { + a.IsSimpleArray = true + } + + return nil +} + +func (a *AnalyzedSchema) inferTuple() { + tuple := a.hasItems && a.schema.Items.Schemas != nil + a.IsTuple = tuple && !a.hasAdditionalItems + a.IsTupleWithExtra = tuple && a.hasAdditionalItems +} + +func (a *AnalyzedSchema) inferBaseType() { + if a.isObjectType() { + a.IsBaseType = a.schema.Discriminator != "" + } +} + +func (a *AnalyzedSchema) inferEnum() { + a.IsEnum = len(a.schema.Enum) > 0 +} + +func (a *AnalyzedSchema) initializeFlags() { + a.hasProps = len(a.schema.Properties) > 0 + a.hasAllOf = len(a.schema.AllOf) > 0 + a.hasRef = a.schema.Ref.String() != "" + + a.hasItems = a.schema.Items != nil && + (a.schema.Items.Schema != nil || len(a.schema.Items.Schemas) > 0) + + a.hasAdditionalProps = a.schema.AdditionalProperties != nil && + (a.schema.AdditionalProperties.Schema != nil || a.schema.AdditionalProperties.Allows) + + a.hasAdditionalItems = a.schema.AdditionalItems != nil && + (a.schema.AdditionalItems.Schema != nil || a.schema.AdditionalItems.Allows) +} + +func (a *AnalyzedSchema) isObjectType() bool { + return !a.hasRef && (a.schema.Type == nil || a.schema.Type.Contains("") || a.schema.Type.Contains("object")) +} + +func (a *AnalyzedSchema) isArrayType() bool { + return !a.hasRef && (a.schema.Type != nil && a.schema.Type.Contains("array")) +} + +// isAnalyzedAsComplex determines if an analyzed schema is eligible to flattening (i.e. it is "complex"). +// +// Complex means the schema is any of: +// - a simple type (primitive) +// - an array of something (items are possibly complex ; if this is the case, items will generate a definition) +// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will +// generate a definition) +func (a *AnalyzedSchema) isAnalyzedAsComplex() bool { + return !a.IsSimpleSchema && !a.IsArray && !a.IsMap +} diff --git a/test/tools/vendor/github.com/go-openapi/errors/.gitattributes b/test/tools/vendor/github.com/go-openapi/errors/.gitattributes new file mode 100644 index 0000000000..a0717e4b3b --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/errors/.gitattributes @@ -0,0 +1 @@ +*.go text eol=lf \ No newline at end of file diff --git a/test/tools/vendor/github.com/go-openapi/errors/.gitignore b/test/tools/vendor/github.com/go-openapi/errors/.gitignore new file mode 100644 index 0000000000..dd91ed6a04 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/errors/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/test/tools/vendor/github.com/go-openapi/errors/.golangci.yml b/test/tools/vendor/github.com/go-openapi/errors/.golangci.yml new file mode 100644 index 0000000000..4e1fc0c7d4 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/errors/.golangci.yml @@ -0,0 +1,48 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 30 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 4 +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoglobals + - godox + - gocognit + - whitespace + - wsl + - funlen + - gochecknoglobals + - gochecknoinits + - scopelint + - wrapcheck + - exhaustivestruct + - exhaustive + - nlreturn + - testpackage + - gci + - gofumpt + - goerr113 + - gomnd + - tparallel + - nestif + - godot + - errorlint + - paralleltest + - tparallel + - cyclop + - errname + - varnamelen + - exhaustruct + - maintidx diff --git a/test/tools/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md b/test/tools/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/LICENSE b/test/tools/vendor/github.com/go-openapi/errors/LICENSE similarity index 100% rename from vendor/gopkg.in/go-jose/go-jose.v2/LICENSE rename to test/tools/vendor/github.com/go-openapi/errors/LICENSE diff --git a/test/tools/vendor/github.com/go-openapi/errors/README.md b/test/tools/vendor/github.com/go-openapi/errors/README.md new file mode 100644 index 0000000000..4aac049e60 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/errors/README.md @@ -0,0 +1,11 @@ +# OpenAPI errors + +[![Build Status](https://travis-ci.org/go-openapi/errors.svg?branch=master)](https://travis-ci.org/go-openapi/errors) +[![codecov](https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/errors) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/errors.svg)](https://pkg.go.dev/github.com/go-openapi/errors) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/errors.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/errors)](https://goreportcard.com/report/github.com/go-openapi/errors) + +Shared errors and error interface used throughout the various libraries found in the go-openapi toolkit. diff --git a/test/tools/vendor/github.com/go-openapi/errors/api.go b/test/tools/vendor/github.com/go-openapi/errors/api.go new file mode 100644 index 0000000000..c13f3435fa --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/errors/api.go @@ -0,0 +1,182 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "strings" +) + +// DefaultHTTPCode is used when the error Code cannot be used as an HTTP code. +var DefaultHTTPCode = http.StatusUnprocessableEntity + +// Error represents a error interface all swagger framework errors implement +type Error interface { + error + Code() int32 +} + +type apiError struct { + code int32 + message string +} + +func (a *apiError) Error() string { + return a.message +} + +func (a *apiError) Code() int32 { + return a.code +} + +// MarshalJSON implements the JSON encoding interface +func (a apiError) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "code": a.code, + "message": a.message, + }) +} + +// New creates a new API error with a code and a message +func New(code int32, message string, args ...interface{}) Error { + if len(args) > 0 { + return &apiError{code, fmt.Sprintf(message, args...)} + } + return &apiError{code, message} +} + +// NotFound creates a new not found error +func NotFound(message string, args ...interface{}) Error { + if message == "" { + message = "Not found" + } + return New(http.StatusNotFound, fmt.Sprintf(message, args...)) +} + +// NotImplemented creates a new not implemented error +func NotImplemented(message string) Error { + return New(http.StatusNotImplemented, message) +} + +// MethodNotAllowedError represents an error for when the path matches but the method doesn't +type MethodNotAllowedError struct { + code int32 + Allowed []string + message string +} + +func (m *MethodNotAllowedError) Error() string { + return m.message +} + +// Code the error code +func (m *MethodNotAllowedError) Code() int32 { + return m.code +} + +// MarshalJSON implements the JSON encoding interface +func (m MethodNotAllowedError) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "code": m.code, + "message": m.message, + "allowed": m.Allowed, + }) +} + +func errorAsJSON(err Error) []byte { + //nolint:errchkjson + b, _ := json.Marshal(struct { + Code int32 `json:"code"` + Message string `json:"message"` + }{err.Code(), err.Error()}) + return b +} + +func flattenComposite(errs *CompositeError) *CompositeError { + var res []error + for _, er := range errs.Errors { + switch e := er.(type) { + case *CompositeError: + if e != nil && len(e.Errors) > 0 { + flat := flattenComposite(e) + if len(flat.Errors) > 0 { + res = append(res, flat.Errors...) + } + } + default: + if e != nil { + res = append(res, e) + } + } + } + return CompositeValidationError(res...) +} + +// MethodNotAllowed creates a new method not allowed error +func MethodNotAllowed(requested string, allow []string) Error { + msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ",")) + return &MethodNotAllowedError{code: http.StatusMethodNotAllowed, Allowed: allow, message: msg} +} + +// ServeError the error handler interface implementation +func ServeError(rw http.ResponseWriter, r *http.Request, err error) { + rw.Header().Set("Content-Type", "application/json") + switch e := err.(type) { + case *CompositeError: + er := flattenComposite(e) + // strips composite errors to first element only + if len(er.Errors) > 0 { + ServeError(rw, r, er.Errors[0]) + } else { + // guard against empty CompositeError (invalid construct) + ServeError(rw, r, nil) + } + case *MethodNotAllowedError: + rw.Header().Add("Allow", strings.Join(e.Allowed, ",")) + rw.WriteHeader(asHTTPCode(int(e.Code()))) + if r == nil || r.Method != http.MethodHead { + _, _ = rw.Write(errorAsJSON(e)) + } + case Error: + value := reflect.ValueOf(e) + if value.Kind() == reflect.Ptr && value.IsNil() { + rw.WriteHeader(http.StatusInternalServerError) + _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) + return + } + rw.WriteHeader(asHTTPCode(int(e.Code()))) + if r == nil || r.Method != http.MethodHead { + _, _ = rw.Write(errorAsJSON(e)) + } + case nil: + rw.WriteHeader(http.StatusInternalServerError) + _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) + default: + rw.WriteHeader(http.StatusInternalServerError) + if r == nil || r.Method != http.MethodHead { + _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, err.Error()))) + } + } +} + +func asHTTPCode(input int) int { + if input >= 600 { + return DefaultHTTPCode + } + return input +} diff --git a/test/tools/vendor/github.com/go-openapi/errors/auth.go b/test/tools/vendor/github.com/go-openapi/errors/auth.go new file mode 100644 index 0000000000..0545b501bd --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/errors/auth.go @@ -0,0 +1,22 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import "net/http" + +// Unauthenticated returns an unauthenticated error +func Unauthenticated(scheme string) Error { + return New(http.StatusUnauthorized, "unauthenticated for %s", scheme) +} diff --git a/test/tools/vendor/github.com/go-openapi/errors/doc.go b/test/tools/vendor/github.com/go-openapi/errors/doc.go new file mode 100644 index 0000000000..af01190ce6 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/errors/doc.go @@ -0,0 +1,26 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package errors provides an Error interface and several concrete types +implementing this interface to manage API errors and JSON-schema validation +errors. + +A middleware handler ServeError() is provided to serve the errors types +it defines. + +It is used throughout the various go-openapi toolkit libraries +(https://github.com/go-openapi). +*/ +package errors diff --git a/test/tools/vendor/github.com/go-openapi/errors/headers.go b/test/tools/vendor/github.com/go-openapi/errors/headers.go new file mode 100644 index 0000000000..dfebe8f95f --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/errors/headers.go @@ -0,0 +1,103 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" +) + +// Validation represents a failure of a precondition +type Validation struct { + code int32 + Name string + In string + Value interface{} + message string + Values []interface{} +} + +func (e *Validation) Error() string { + return e.message +} + +// Code the error code +func (e *Validation) Code() int32 { + return e.code +} + +// MarshalJSON implements the JSON encoding interface +func (e Validation) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "code": e.code, + "message": e.message, + "in": e.In, + "name": e.Name, + "value": e.Value, + "values": e.Values, + }) +} + +// ValidateName sets the name for a validation or updates it for a nested property +func (e *Validation) ValidateName(name string) *Validation { + if name != "" { + if e.Name == "" { + e.Name = name + e.message = name + e.message + } else { + e.Name = name + "." + e.Name + e.message = name + "." + e.message + } + } + return e +} + +const ( + contentTypeFail = `unsupported media type %q, only %v are allowed` + responseFormatFail = `unsupported media type requested, only %v are available` +) + +// InvalidContentType error for an invalid content type +func InvalidContentType(value string, allowed []string) *Validation { + values := make([]interface{}, 0, len(allowed)) + for _, v := range allowed { + values = append(values, v) + } + return &Validation{ + code: http.StatusUnsupportedMediaType, + Name: "Content-Type", + In: "header", + Value: value, + Values: values, + message: fmt.Sprintf(contentTypeFail, value, allowed), + } +} + +// InvalidResponseFormat error for an unacceptable response format request +func InvalidResponseFormat(value string, allowed []string) *Validation { + values := make([]interface{}, 0, len(allowed)) + for _, v := range allowed { + values = append(values, v) + } + return &Validation{ + code: http.StatusNotAcceptable, + Name: "Accept", + In: "header", + Value: value, + Values: values, + message: fmt.Sprintf(responseFormatFail, allowed), + } +} diff --git a/test/tools/vendor/github.com/go-openapi/errors/middleware.go b/test/tools/vendor/github.com/go-openapi/errors/middleware.go new file mode 100644 index 0000000000..963472d1f3 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/errors/middleware.go @@ -0,0 +1,50 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "bytes" + "fmt" + "strings" +) + +// APIVerificationFailed is an error that contains all the missing info for a mismatched section +// between the api registrations and the api spec +type APIVerificationFailed struct { + Section string `json:"section,omitempty"` + MissingSpecification []string `json:"missingSpecification,omitempty"` + MissingRegistration []string `json:"missingRegistration,omitempty"` +} + +func (v *APIVerificationFailed) Error() string { + buf := bytes.NewBuffer(nil) + + hasRegMissing := len(v.MissingRegistration) > 0 + hasSpecMissing := len(v.MissingSpecification) > 0 + + if hasRegMissing { + buf.WriteString(fmt.Sprintf("missing [%s] %s registrations", strings.Join(v.MissingRegistration, ", "), v.Section)) + } + + if hasRegMissing && hasSpecMissing { + buf.WriteString("\n") + } + + if hasSpecMissing { + buf.WriteString(fmt.Sprintf("missing from spec file [%s] %s", strings.Join(v.MissingSpecification, ", "), v.Section)) + } + + return buf.String() +} diff --git a/test/tools/vendor/github.com/go-openapi/errors/parsing.go b/test/tools/vendor/github.com/go-openapi/errors/parsing.go new file mode 100644 index 0000000000..5096e1ea7b --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/errors/parsing.go @@ -0,0 +1,78 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "encoding/json" + "fmt" +) + +// ParseError represents a parsing error +type ParseError struct { + code int32 + Name string + In string + Value string + Reason error + message string +} + +func (e *ParseError) Error() string { + return e.message +} + +// Code returns the http status code for this error +func (e *ParseError) Code() int32 { + return e.code +} + +// MarshalJSON implements the JSON encoding interface +func (e ParseError) MarshalJSON() ([]byte, error) { + var reason string + if e.Reason != nil { + reason = e.Reason.Error() + } + return json.Marshal(map[string]interface{}{ + "code": e.code, + "message": e.message, + "in": e.In, + "name": e.Name, + "value": e.Value, + "reason": reason, + }) +} + +const ( + parseErrorTemplContent = `parsing %s %s from %q failed, because %s` + parseErrorTemplContentNoIn = `parsing %s from %q failed, because %s` +) + +// NewParseError creates a new parse error +func NewParseError(name, in, value string, reason error) *ParseError { + var msg string + if in == "" { + msg = fmt.Sprintf(parseErrorTemplContentNoIn, name, value, reason) + } else { + msg = fmt.Sprintf(parseErrorTemplContent, name, in, value, reason) + } + return &ParseError{ + code: 400, + Name: name, + In: in, + Value: value, + Reason: reason, + message: msg, + } +} diff --git a/test/tools/vendor/github.com/go-openapi/errors/schema.go b/test/tools/vendor/github.com/go-openapi/errors/schema.go new file mode 100644 index 0000000000..da5f6c78cb --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/errors/schema.go @@ -0,0 +1,611 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "encoding/json" + "fmt" + "strings" +) + +const ( + invalidType = "%s is an invalid type name" + typeFail = "%s in %s must be of type %s" + typeFailWithData = "%s in %s must be of type %s: %q" + typeFailWithError = "%s in %s must be of type %s, because: %s" + requiredFail = "%s in %s is required" + readOnlyFail = "%s in %s is readOnly" + tooLongMessage = "%s in %s should be at most %d chars long" + tooShortMessage = "%s in %s should be at least %d chars long" + patternFail = "%s in %s should match '%s'" + enumFail = "%s in %s should be one of %v" + multipleOfFail = "%s in %s should be a multiple of %v" + maxIncFail = "%s in %s should be less than or equal to %v" + maxExcFail = "%s in %s should be less than %v" + minIncFail = "%s in %s should be greater than or equal to %v" + minExcFail = "%s in %s should be greater than %v" + uniqueFail = "%s in %s shouldn't contain duplicates" + maxItemsFail = "%s in %s should have at most %d items" + minItemsFail = "%s in %s should have at least %d items" + typeFailNoIn = "%s must be of type %s" + typeFailWithDataNoIn = "%s must be of type %s: %q" + typeFailWithErrorNoIn = "%s must be of type %s, because: %s" + requiredFailNoIn = "%s is required" + readOnlyFailNoIn = "%s is readOnly" + tooLongMessageNoIn = "%s should be at most %d chars long" + tooShortMessageNoIn = "%s should be at least %d chars long" + patternFailNoIn = "%s should match '%s'" + enumFailNoIn = "%s should be one of %v" + multipleOfFailNoIn = "%s should be a multiple of %v" + maxIncFailNoIn = "%s should be less than or equal to %v" + maxExcFailNoIn = "%s should be less than %v" + minIncFailNoIn = "%s should be greater than or equal to %v" + minExcFailNoIn = "%s should be greater than %v" + uniqueFailNoIn = "%s shouldn't contain duplicates" + maxItemsFailNoIn = "%s should have at most %d items" + minItemsFailNoIn = "%s should have at least %d items" + noAdditionalItems = "%s in %s can't have additional items" + noAdditionalItemsNoIn = "%s can't have additional items" + tooFewProperties = "%s in %s should have at least %d properties" + tooFewPropertiesNoIn = "%s should have at least %d properties" + tooManyProperties = "%s in %s should have at most %d properties" + tooManyPropertiesNoIn = "%s should have at most %d properties" + unallowedProperty = "%s.%s in %s is a forbidden property" + unallowedPropertyNoIn = "%s.%s is a forbidden property" + failedAllPatternProps = "%s.%s in %s failed all pattern properties" + failedAllPatternPropsNoIn = "%s.%s failed all pattern properties" + multipleOfMustBePositive = "factor MultipleOf declared for %s must be positive: %v" +) + +// All code responses can be used to differentiate errors for different handling +// by the consuming program +const ( + // CompositeErrorCode remains 422 for backwards-compatibility + // and to separate it from validation errors with cause + CompositeErrorCode = 422 + // InvalidTypeCode is used for any subclass of invalid types + InvalidTypeCode = 600 + iota + RequiredFailCode + TooLongFailCode + TooShortFailCode + PatternFailCode + EnumFailCode + MultipleOfFailCode + MaxFailCode + MinFailCode + UniqueFailCode + MaxItemsFailCode + MinItemsFailCode + NoAdditionalItemsCode + TooFewPropertiesCode + TooManyPropertiesCode + UnallowedPropertyCode + FailedAllPatternPropsCode + MultipleOfMustBePositiveCode + ReadOnlyFailCode +) + +// CompositeError is an error that groups several errors together +type CompositeError struct { + Errors []error + code int32 + message string +} + +// Code for this error +func (c *CompositeError) Code() int32 { + return c.code +} + +func (c *CompositeError) Error() string { + if len(c.Errors) > 0 { + msgs := []string{c.message + ":"} + for _, e := range c.Errors { + msgs = append(msgs, e.Error()) + } + return strings.Join(msgs, "\n") + } + return c.message +} + +// MarshalJSON implements the JSON encoding interface +func (c CompositeError) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "code": c.code, + "message": c.message, + "errors": c.Errors, + }) +} + +// CompositeValidationError an error to wrap a bunch of other errors +func CompositeValidationError(errors ...error) *CompositeError { + return &CompositeError{ + code: CompositeErrorCode, + Errors: append([]error{}, errors...), + message: "validation failure list", + } +} + +// ValidateName recursively sets the name for all validations or updates them for nested properties +func (c *CompositeError) ValidateName(name string) *CompositeError { + for i, e := range c.Errors { + if ve, ok := e.(*Validation); ok { + c.Errors[i] = ve.ValidateName(name) + } else if ce, ok := e.(*CompositeError); ok { + c.Errors[i] = ce.ValidateName(name) + } + } + + return c +} + +// FailedAllPatternProperties an error for when the property doesn't match a pattern +func FailedAllPatternProperties(name, in, key string) *Validation { + msg := fmt.Sprintf(failedAllPatternProps, name, key, in) + if in == "" { + msg = fmt.Sprintf(failedAllPatternPropsNoIn, name, key) + } + return &Validation{ + code: FailedAllPatternPropsCode, + Name: name, + In: in, + Value: key, + message: msg, + } +} + +// PropertyNotAllowed an error for when the property doesn't match a pattern +func PropertyNotAllowed(name, in, key string) *Validation { + msg := fmt.Sprintf(unallowedProperty, name, key, in) + if in == "" { + msg = fmt.Sprintf(unallowedPropertyNoIn, name, key) + } + return &Validation{ + code: UnallowedPropertyCode, + Name: name, + In: in, + Value: key, + message: msg, + } +} + +// TooFewProperties an error for an object with too few properties +func TooFewProperties(name, in string, n int64) *Validation { + msg := fmt.Sprintf(tooFewProperties, name, in, n) + if in == "" { + msg = fmt.Sprintf(tooFewPropertiesNoIn, name, n) + } + return &Validation{ + code: TooFewPropertiesCode, + Name: name, + In: in, + Value: n, + message: msg, + } +} + +// TooManyProperties an error for an object with too many properties +func TooManyProperties(name, in string, n int64) *Validation { + msg := fmt.Sprintf(tooManyProperties, name, in, n) + if in == "" { + msg = fmt.Sprintf(tooManyPropertiesNoIn, name, n) + } + return &Validation{ + code: TooManyPropertiesCode, + Name: name, + In: in, + Value: n, + message: msg, + } +} + +// AdditionalItemsNotAllowed an error for invalid additional items +func AdditionalItemsNotAllowed(name, in string) *Validation { + msg := fmt.Sprintf(noAdditionalItems, name, in) + if in == "" { + msg = fmt.Sprintf(noAdditionalItemsNoIn, name) + } + return &Validation{ + code: NoAdditionalItemsCode, + Name: name, + In: in, + message: msg, + } +} + +// InvalidCollectionFormat another flavor of invalid type error +func InvalidCollectionFormat(name, in, format string) *Validation { + return &Validation{ + code: InvalidTypeCode, + Name: name, + In: in, + Value: format, + message: fmt.Sprintf("the collection format %q is not supported for the %s param %q", format, in, name), + } +} + +// InvalidTypeName an error for when the type is invalid +func InvalidTypeName(typeName string) *Validation { + return &Validation{ + code: InvalidTypeCode, + Value: typeName, + message: fmt.Sprintf(invalidType, typeName), + } +} + +// InvalidType creates an error for when the type is invalid +func InvalidType(name, in, typeName string, value interface{}) *Validation { + var message string + + if in != "" { + switch value.(type) { + case string: + message = fmt.Sprintf(typeFailWithData, name, in, typeName, value) + case error: + message = fmt.Sprintf(typeFailWithError, name, in, typeName, value) + default: + message = fmt.Sprintf(typeFail, name, in, typeName) + } + } else { + switch value.(type) { + case string: + message = fmt.Sprintf(typeFailWithDataNoIn, name, typeName, value) + case error: + message = fmt.Sprintf(typeFailWithErrorNoIn, name, typeName, value) + default: + message = fmt.Sprintf(typeFailNoIn, name, typeName) + } + } + + return &Validation{ + code: InvalidTypeCode, + Name: name, + In: in, + Value: value, + message: message, + } + +} + +// DuplicateItems error for when an array contains duplicates +func DuplicateItems(name, in string) *Validation { + msg := fmt.Sprintf(uniqueFail, name, in) + if in == "" { + msg = fmt.Sprintf(uniqueFailNoIn, name) + } + return &Validation{ + code: UniqueFailCode, + Name: name, + In: in, + message: msg, + } +} + +// TooManyItems error for when an array contains too many items +func TooManyItems(name, in string, max int64, value interface{}) *Validation { + msg := fmt.Sprintf(maxItemsFail, name, in, max) + if in == "" { + msg = fmt.Sprintf(maxItemsFailNoIn, name, max) + } + + return &Validation{ + code: MaxItemsFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// TooFewItems error for when an array contains too few items +func TooFewItems(name, in string, min int64, value interface{}) *Validation { + msg := fmt.Sprintf(minItemsFail, name, in, min) + if in == "" { + msg = fmt.Sprintf(minItemsFailNoIn, name, min) + } + return &Validation{ + code: MinItemsFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// ExceedsMaximumInt error for when maximum validation fails +func ExceedsMaximumInt(name, in string, max int64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := maxIncFailNoIn + if exclusive { + m = maxExcFailNoIn + } + message = fmt.Sprintf(m, name, max) + } else { + m := maxIncFail + if exclusive { + m = maxExcFail + } + message = fmt.Sprintf(m, name, in, max) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMaximumUint error for when maximum validation fails +func ExceedsMaximumUint(name, in string, max uint64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := maxIncFailNoIn + if exclusive { + m = maxExcFailNoIn + } + message = fmt.Sprintf(m, name, max) + } else { + m := maxIncFail + if exclusive { + m = maxExcFail + } + message = fmt.Sprintf(m, name, in, max) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMaximum error for when maximum validation fails +func ExceedsMaximum(name, in string, max float64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := maxIncFailNoIn + if exclusive { + m = maxExcFailNoIn + } + message = fmt.Sprintf(m, name, max) + } else { + m := maxIncFail + if exclusive { + m = maxExcFail + } + message = fmt.Sprintf(m, name, in, max) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMinimumInt error for when minimum validation fails +func ExceedsMinimumInt(name, in string, min int64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, min) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, min) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMinimumUint error for when minimum validation fails +func ExceedsMinimumUint(name, in string, min uint64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, min) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, min) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMinimum error for when minimum validation fails +func ExceedsMinimum(name, in string, min float64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, min) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, min) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// NotMultipleOf error for when multiple of validation fails +func NotMultipleOf(name, in string, multiple, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(multipleOfFailNoIn, name, multiple) + } else { + msg = fmt.Sprintf(multipleOfFail, name, in, multiple) + } + return &Validation{ + code: MultipleOfFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// EnumFail error for when an enum validation fails +func EnumFail(name, in string, value interface{}, values []interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(enumFailNoIn, name, values) + } else { + msg = fmt.Sprintf(enumFail, name, in, values) + } + + return &Validation{ + code: EnumFailCode, + Name: name, + In: in, + Value: value, + Values: values, + message: msg, + } +} + +// Required error for when a value is missing +func Required(name, in string, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(requiredFailNoIn, name) + } else { + msg = fmt.Sprintf(requiredFail, name, in) + } + return &Validation{ + code: RequiredFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// ReadOnly error for when a value is present in request +func ReadOnly(name, in string, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(readOnlyFailNoIn, name) + } else { + msg = fmt.Sprintf(readOnlyFail, name, in) + } + return &Validation{ + code: ReadOnlyFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// TooLong error for when a string is too long +func TooLong(name, in string, max int64, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(tooLongMessageNoIn, name, max) + } else { + msg = fmt.Sprintf(tooLongMessage, name, in, max) + } + return &Validation{ + code: TooLongFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// TooShort error for when a string is too short +func TooShort(name, in string, min int64, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(tooShortMessageNoIn, name, min) + } else { + msg = fmt.Sprintf(tooShortMessage, name, in, min) + } + + return &Validation{ + code: TooShortFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// FailedPattern error for when a string fails a regex pattern match +// the pattern that is returned is the ECMA syntax version of the pattern not the golang version. +func FailedPattern(name, in, pattern string, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(patternFailNoIn, name, pattern) + } else { + msg = fmt.Sprintf(patternFail, name, in, pattern) + } + + return &Validation{ + code: PatternFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// MultipleOfMustBePositive error for when a +// multipleOf factor is negative +func MultipleOfMustBePositive(name, in string, factor interface{}) *Validation { + return &Validation{ + code: MultipleOfMustBePositiveCode, + Name: name, + In: in, + Value: factor, + message: fmt.Sprintf(multipleOfMustBePositive, name, factor), + } +} diff --git a/test/tools/vendor/github.com/go-openapi/inflect/.hgignore b/test/tools/vendor/github.com/go-openapi/inflect/.hgignore new file mode 100644 index 0000000000..6cc3d7ce11 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/inflect/.hgignore @@ -0,0 +1 @@ +swp$ diff --git a/test/tools/vendor/github.com/go-openapi/inflect/LICENCE b/test/tools/vendor/github.com/go-openapi/inflect/LICENCE new file mode 100644 index 0000000000..8a36b944a5 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/inflect/LICENCE @@ -0,0 +1,7 @@ +Copyright (c) 2011 Chris Farmiloe + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/tools/vendor/github.com/go-openapi/inflect/README b/test/tools/vendor/github.com/go-openapi/inflect/README new file mode 100644 index 0000000000..014699a222 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/inflect/README @@ -0,0 +1,168 @@ +INSTALLATION + +go get bitbucket.org/pkg/inflect + +PACKAGE + +package inflect + + +FUNCTIONS + +func AddAcronym(word string) + +func AddHuman(suffix, replacement string) + +func AddIrregular(singular, plural string) + +func AddPlural(suffix, replacement string) + +func AddSingular(suffix, replacement string) + +func AddUncountable(word string) + +func Asciify(word string) string + +func Camelize(word string) string + +func CamelizeDownFirst(word string) string + +func Capitalize(word string) string + +func Dasherize(word string) string + +func ForeignKey(word string) string + +func ForeignKeyCondensed(word string) string + +func Humanize(word string) string + +func Ordinalize(word string) string + +func Parameterize(word string) string + +func ParameterizeJoin(word, sep string) string + +func Pluralize(word string) string + +func Singularize(word string) string + +func Tableize(word string) string + +func Titleize(word string) string + +func Typeify(word string) string + +func Uncountables() map[string]bool + +func Underscore(word string) string + + +TYPES + +type Rule struct { + // contains filtered or unexported fields +} +used by rulesets + +type Ruleset struct { + // contains filtered or unexported fields +} +a Ruleset is the config of pluralization rules +you can extend the rules with the Add* methods + +func NewDefaultRuleset() *Ruleset +create a new ruleset and load it with the default +set of common English pluralization rules + +func NewRuleset() *Ruleset +create a blank ruleset. Unless you are going to +build your own rules from scratch you probably +won't need this and can just use the defaultRuleset +via the global inflect.* methods + +func (rs *Ruleset) AddAcronym(word string) +if you use acronym you may need to add them to the ruleset +to prevent Underscored words of things like "HTML" coming out +as "h_t_m_l" + +func (rs *Ruleset) AddHuman(suffix, replacement string) +Human rules are applied by humanize to show more friendly +versions of words + +func (rs *Ruleset) AddIrregular(singular, plural string) +Add any inconsistant pluralizing/sinularizing rules +to the set here. + +func (rs *Ruleset) AddPlural(suffix, replacement string) +add a pluralization rule + +func (rs *Ruleset) AddPluralExact(suffix, replacement string, exact bool) +add a pluralization rule with full string match + +func (rs *Ruleset) AddSingular(suffix, replacement string) +add a singular rule + +func (rs *Ruleset) AddSingularExact(suffix, replacement string, exact bool) +same as AddSingular but you can set `exact` to force +a full string match + +func (rs *Ruleset) AddUncountable(word string) +add a word to this ruleset that has the same singular and plural form +for example: "rice" + +func (rs *Ruleset) Asciify(word string) string +transforms latin characters like é -> e + +func (rs *Ruleset) Camelize(word string) string +"dino_party" -> "DinoParty" + +func (rs *Ruleset) CamelizeDownFirst(word string) string +same as Camelcase but with first letter downcased + +func (rs *Ruleset) Capitalize(word string) string +uppercase first character + +func (rs *Ruleset) Dasherize(word string) string +"SomeText" -> "some-text" + +func (rs *Ruleset) ForeignKey(word string) string +an underscored foreign key name "Person" -> "person_id" + +func (rs *Ruleset) ForeignKeyCondensed(word string) string +a foreign key (with an underscore) "Person" -> "personid" + +func (rs *Ruleset) Humanize(word string) string +First letter of sentance captitilized +Uses custom friendly replacements via AddHuman() + +func (rs *Ruleset) Ordinalize(str string) string +"1031" -> "1031st" + +func (rs *Ruleset) Parameterize(word string) string +param safe dasherized names like "my-param" + +func (rs *Ruleset) ParameterizeJoin(word, sep string) string +param safe dasherized names with custom seperator + +func (rs *Ruleset) Pluralize(word string) string +returns the plural form of a singular word + +func (rs *Ruleset) Singularize(word string) string +returns the singular form of a plural word + +func (rs *Ruleset) Tableize(word string) string +Rails style pluralized table names: "SuperPerson" -> "super_people" + +func (rs *Ruleset) Titleize(word string) string +Captitilize every word in sentance "hello there" -> "Hello There" + +func (rs *Ruleset) Typeify(word string) string +"something_like_this" -> "SomethingLikeThis" + +func (rs *Ruleset) Uncountables() map[string]bool + +func (rs *Ruleset) Underscore(word string) string +lowercase underscore version "BigBen" -> "big_ben" + + diff --git a/test/tools/vendor/github.com/go-openapi/inflect/inflect.go b/test/tools/vendor/github.com/go-openapi/inflect/inflect.go new file mode 100644 index 0000000000..3008844caf --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/inflect/inflect.go @@ -0,0 +1,713 @@ +package inflect + +import ( + "fmt" + "regexp" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// used by rulesets +type Rule struct { + suffix string + replacement string + exact bool +} + +// a Ruleset is the config of pluralization rules +// you can extend the rules with the Add* methods +type Ruleset struct { + uncountables map[string]bool + plurals []*Rule + singulars []*Rule + humans []*Rule + acronyms []*Rule + acronymMatcher *regexp.Regexp +} + +// create a blank ruleset. Unless you are going to +// build your own rules from scratch you probably +// won't need this and can just use the defaultRuleset +// via the global inflect.* methods +func NewRuleset() *Ruleset { + rs := new(Ruleset) + rs.uncountables = make(map[string]bool) + rs.plurals = make([]*Rule, 0) + rs.singulars = make([]*Rule, 0) + rs.humans = make([]*Rule, 0) + rs.acronyms = make([]*Rule, 0) + return rs +} + +// create a new ruleset and load it with the default +// set of common English pluralization rules +func NewDefaultRuleset() *Ruleset { + rs := NewRuleset() + rs.AddPlural("s", "s") + rs.AddPlural("testis", "testes") + rs.AddPlural("axis", "axes") + rs.AddPlural("octopus", "octopi") + rs.AddPlural("virus", "viri") + rs.AddPlural("octopi", "octopi") + rs.AddPlural("viri", "viri") + rs.AddPlural("alias", "aliases") + rs.AddPlural("status", "statuses") + rs.AddPlural("bus", "buses") + rs.AddPlural("buffalo", "buffaloes") + rs.AddPlural("tomato", "tomatoes") + rs.AddPlural("tum", "ta") + rs.AddPlural("ium", "ia") + rs.AddPlural("ta", "ta") + rs.AddPlural("ia", "ia") + rs.AddPlural("sis", "ses") + rs.AddPlural("lf", "lves") + rs.AddPlural("rf", "rves") + rs.AddPlural("afe", "aves") + rs.AddPlural("bfe", "bves") + rs.AddPlural("cfe", "cves") + rs.AddPlural("dfe", "dves") + rs.AddPlural("efe", "eves") + rs.AddPlural("gfe", "gves") + rs.AddPlural("hfe", "hves") + rs.AddPlural("ife", "ives") + rs.AddPlural("jfe", "jves") + rs.AddPlural("kfe", "kves") + rs.AddPlural("lfe", "lves") + rs.AddPlural("mfe", "mves") + rs.AddPlural("nfe", "nves") + rs.AddPlural("ofe", "oves") + rs.AddPlural("pfe", "pves") + rs.AddPlural("qfe", "qves") + rs.AddPlural("rfe", "rves") + rs.AddPlural("sfe", "sves") + rs.AddPlural("tfe", "tves") + rs.AddPlural("ufe", "uves") + rs.AddPlural("vfe", "vves") + rs.AddPlural("wfe", "wves") + rs.AddPlural("xfe", "xves") + rs.AddPlural("yfe", "yves") + rs.AddPlural("zfe", "zves") + rs.AddPlural("hive", "hives") + rs.AddPlural("quy", "quies") + rs.AddPlural("by", "bies") + rs.AddPlural("cy", "cies") + rs.AddPlural("dy", "dies") + rs.AddPlural("fy", "fies") + rs.AddPlural("gy", "gies") + rs.AddPlural("hy", "hies") + rs.AddPlural("jy", "jies") + rs.AddPlural("ky", "kies") + rs.AddPlural("ly", "lies") + rs.AddPlural("my", "mies") + rs.AddPlural("ny", "nies") + rs.AddPlural("py", "pies") + rs.AddPlural("qy", "qies") + rs.AddPlural("ry", "ries") + rs.AddPlural("sy", "sies") + rs.AddPlural("ty", "ties") + rs.AddPlural("vy", "vies") + rs.AddPlural("wy", "wies") + rs.AddPlural("xy", "xies") + rs.AddPlural("zy", "zies") + rs.AddPlural("x", "xes") + rs.AddPlural("ch", "ches") + rs.AddPlural("ss", "sses") + rs.AddPlural("sh", "shes") + rs.AddPlural("matrix", "matrices") + rs.AddPlural("vertix", "vertices") + rs.AddPlural("indix", "indices") + rs.AddPlural("matrex", "matrices") + rs.AddPlural("vertex", "vertices") + rs.AddPlural("index", "indices") + rs.AddPlural("mouse", "mice") + rs.AddPlural("louse", "lice") + rs.AddPlural("mice", "mice") + rs.AddPlural("lice", "lice") + rs.AddPluralExact("ox", "oxen", true) + rs.AddPluralExact("oxen", "oxen", true) + rs.AddPluralExact("quiz", "quizzes", true) + rs.AddSingular("s", "") + rs.AddSingular("news", "news") + rs.AddSingular("ta", "tum") + rs.AddSingular("ia", "ium") + rs.AddSingular("analyses", "analysis") + rs.AddSingular("bases", "basis") + rs.AddSingular("diagnoses", "diagnosis") + rs.AddSingular("parentheses", "parenthesis") + rs.AddSingular("prognoses", "prognosis") + rs.AddSingular("synopses", "synopsis") + rs.AddSingular("theses", "thesis") + rs.AddSingular("analyses", "analysis") + rs.AddSingular("aves", "afe") + rs.AddSingular("bves", "bfe") + rs.AddSingular("cves", "cfe") + rs.AddSingular("dves", "dfe") + rs.AddSingular("eves", "efe") + rs.AddSingular("gves", "gfe") + rs.AddSingular("hves", "hfe") + rs.AddSingular("ives", "ife") + rs.AddSingular("jves", "jfe") + rs.AddSingular("kves", "kfe") + rs.AddSingular("lves", "lfe") + rs.AddSingular("mves", "mfe") + rs.AddSingular("nves", "nfe") + rs.AddSingular("oves", "ofe") + rs.AddSingular("pves", "pfe") + rs.AddSingular("qves", "qfe") + rs.AddSingular("rves", "rfe") + rs.AddSingular("sves", "sfe") + rs.AddSingular("tves", "tfe") + rs.AddSingular("uves", "ufe") + rs.AddSingular("vves", "vfe") + rs.AddSingular("wves", "wfe") + rs.AddSingular("xves", "xfe") + rs.AddSingular("yves", "yfe") + rs.AddSingular("zves", "zfe") + rs.AddSingular("hives", "hive") + rs.AddSingular("tives", "tive") + rs.AddSingular("lves", "lf") + rs.AddSingular("rves", "rf") + rs.AddSingular("quies", "quy") + rs.AddSingular("bies", "by") + rs.AddSingular("cies", "cy") + rs.AddSingular("dies", "dy") + rs.AddSingular("fies", "fy") + rs.AddSingular("gies", "gy") + rs.AddSingular("hies", "hy") + rs.AddSingular("jies", "jy") + rs.AddSingular("kies", "ky") + rs.AddSingular("lies", "ly") + rs.AddSingular("mies", "my") + rs.AddSingular("nies", "ny") + rs.AddSingular("pies", "py") + rs.AddSingular("qies", "qy") + rs.AddSingular("ries", "ry") + rs.AddSingular("sies", "sy") + rs.AddSingular("ties", "ty") + rs.AddSingular("vies", "vy") + rs.AddSingular("wies", "wy") + rs.AddSingular("xies", "xy") + rs.AddSingular("zies", "zy") + rs.AddSingular("series", "series") + rs.AddSingular("movies", "movie") + rs.AddSingular("xes", "x") + rs.AddSingular("ches", "ch") + rs.AddSingular("sses", "ss") + rs.AddSingular("shes", "sh") + rs.AddSingular("mice", "mouse") + rs.AddSingular("lice", "louse") + rs.AddSingular("buses", "bus") + rs.AddSingular("oes", "o") + rs.AddSingular("shoes", "shoe") + rs.AddSingular("crises", "crisis") + rs.AddSingular("axes", "axis") + rs.AddSingular("testes", "testis") + rs.AddSingular("octopi", "octopus") + rs.AddSingular("viri", "virus") + rs.AddSingular("statuses", "status") + rs.AddSingular("aliases", "alias") + rs.AddSingularExact("oxen", "ox", true) + rs.AddSingular("vertices", "vertex") + rs.AddSingular("indices", "index") + rs.AddSingular("matrices", "matrix") + rs.AddSingularExact("quizzes", "quiz", true) + rs.AddSingular("databases", "database") + rs.AddIrregular("person", "people") + rs.AddIrregular("man", "men") + rs.AddIrregular("child", "children") + rs.AddIrregular("sex", "sexes") + rs.AddIrregular("move", "moves") + rs.AddIrregular("zombie", "zombies") + rs.AddUncountable("equipment") + rs.AddUncountable("information") + rs.AddUncountable("rice") + rs.AddUncountable("money") + rs.AddUncountable("species") + rs.AddUncountable("series") + rs.AddUncountable("fish") + rs.AddUncountable("sheep") + rs.AddUncountable("jeans") + rs.AddUncountable("police") + return rs +} + +func (rs *Ruleset) Uncountables() map[string]bool { + return rs.uncountables +} + +// add a pluralization rule +func (rs *Ruleset) AddPlural(suffix, replacement string) { + rs.AddPluralExact(suffix, replacement, false) +} + +// add a pluralization rule with full string match +func (rs *Ruleset) AddPluralExact(suffix, replacement string, exact bool) { + // remove uncountable + delete(rs.uncountables, suffix) + // create rule + r := new(Rule) + r.suffix = suffix + r.replacement = replacement + r.exact = exact + // prepend + rs.plurals = append([]*Rule{r}, rs.plurals...) +} + +// add a singular rule +func (rs *Ruleset) AddSingular(suffix, replacement string) { + rs.AddSingularExact(suffix, replacement, false) +} + +// same as AddSingular but you can set `exact` to force +// a full string match +func (rs *Ruleset) AddSingularExact(suffix, replacement string, exact bool) { + // remove from uncountable + delete(rs.uncountables, suffix) + // create rule + r := new(Rule) + r.suffix = suffix + r.replacement = replacement + r.exact = exact + rs.singulars = append([]*Rule{r}, rs.singulars...) +} + +// Human rules are applied by humanize to show more friendly +// versions of words +func (rs *Ruleset) AddHuman(suffix, replacement string) { + r := new(Rule) + r.suffix = suffix + r.replacement = replacement + rs.humans = append([]*Rule{r}, rs.humans...) +} + +// Add any inconsistant pluralizing/sinularizing rules +// to the set here. +func (rs *Ruleset) AddIrregular(singular, plural string) { + delete(rs.uncountables, singular) + delete(rs.uncountables, plural) + rs.AddPlural(singular, plural) + rs.AddPlural(plural, plural) + rs.AddSingular(plural, singular) +} + +// if you use acronym you may need to add them to the ruleset +// to prevent Underscored words of things like "HTML" coming out +// as "h_t_m_l" +func (rs *Ruleset) AddAcronym(word string) { + r := new(Rule) + r.suffix = word + r.replacement = rs.Titleize(strings.ToLower(word)) + rs.acronyms = append(rs.acronyms, r) +} + +// add a word to this ruleset that has the same singular and plural form +// for example: "rice" +func (rs *Ruleset) AddUncountable(word string) { + rs.uncountables[strings.ToLower(word)] = true +} + +func (rs *Ruleset) isUncountable(word string) bool { + // handle multiple words by using the last one + words := strings.Split(word, " ") + if _, exists := rs.uncountables[strings.ToLower(words[len(words)-1])]; exists { + return true + } + return false +} + +// returns the plural form of a singular word +func (rs *Ruleset) Pluralize(word string) string { + if len(word) == 0 { + return word + } + if rs.isUncountable(word) { + return word + } + for _, rule := range rs.plurals { + if rule.exact { + if word == rule.suffix { + return rule.replacement + } + } else { + if strings.HasSuffix(word, rule.suffix) { + return replaceLast(word, rule.suffix, rule.replacement) + } + } + } + return word + "s" +} + +// returns the singular form of a plural word +func (rs *Ruleset) Singularize(word string) string { + if len(word) == 0 { + return word + } + if rs.isUncountable(word) { + return word + } + for _, rule := range rs.singulars { + if rule.exact { + if word == rule.suffix { + return rule.replacement + } + } else { + if strings.HasSuffix(word, rule.suffix) { + return replaceLast(word, rule.suffix, rule.replacement) + } + } + } + return word +} + +// uppercase first character +func (rs *Ruleset) Capitalize(word string) string { + return strings.ToUpper(word[:1]) + word[1:] +} + +// "dino_party" -> "DinoParty" +func (rs *Ruleset) Camelize(word string) string { + words := splitAtCaseChangeWithTitlecase(word) + return strings.Join(words, "") +} + +// same as Camelcase but with first letter downcased +func (rs *Ruleset) CamelizeDownFirst(word string) string { + word = Camelize(word) + return strings.ToLower(word[:1]) + word[1:] +} + +// Captitilize every word in sentance "hello there" -> "Hello There" +func (rs *Ruleset) Titleize(word string) string { + words := splitAtCaseChangeWithTitlecase(word) + return strings.Join(words, " ") +} + +func (rs *Ruleset) safeCaseAcronyms(word string) string { + // convert an acroymn like HTML into Html + for _, rule := range rs.acronyms { + word = strings.Replace(word, rule.suffix, rule.replacement, -1) + } + return word +} + +func (rs *Ruleset) seperatedWords(word, sep string) string { + word = rs.safeCaseAcronyms(word) + words := splitAtCaseChange(word) + return strings.Join(words, sep) +} + +// lowercase underscore version "BigBen" -> "big_ben" +func (rs *Ruleset) Underscore(word string) string { + return rs.seperatedWords(word, "_") +} + +// First letter of sentance captitilized +// Uses custom friendly replacements via AddHuman() +func (rs *Ruleset) Humanize(word string) string { + word = replaceLast(word, "_id", "") // strip foreign key kinds + // replace and strings in humans list + for _, rule := range rs.humans { + word = strings.Replace(word, rule.suffix, rule.replacement, -1) + } + sentance := rs.seperatedWords(word, " ") + return strings.ToUpper(sentance[:1]) + sentance[1:] +} + +// an underscored foreign key name "Person" -> "person_id" +func (rs *Ruleset) ForeignKey(word string) string { + return rs.Underscore(rs.Singularize(word)) + "_id" +} + +// a foreign key (with an underscore) "Person" -> "personid" +func (rs *Ruleset) ForeignKeyCondensed(word string) string { + return rs.Underscore(word) + "id" +} + +// Rails style pluralized table names: "SuperPerson" -> "super_people" +func (rs *Ruleset) Tableize(word string) string { + return rs.Pluralize(rs.Underscore(rs.Typeify(word))) +} + +var notUrlSafe *regexp.Regexp = regexp.MustCompile(`[^\w\d\-_ ]`) + +// param safe dasherized names like "my-param" +func (rs *Ruleset) Parameterize(word string) string { + return ParameterizeJoin(word, "-") +} + +// param safe dasherized names with custom seperator +func (rs *Ruleset) ParameterizeJoin(word, sep string) string { + word = strings.ToLower(word) + word = rs.Asciify(word) + word = notUrlSafe.ReplaceAllString(word, "") + word = strings.Replace(word, " ", sep, -1) + if len(sep) > 0 { + squash, err := regexp.Compile(sep + "+") + if err == nil { + word = squash.ReplaceAllString(word, sep) + } + } + word = strings.Trim(word, sep+" ") + return word +} + +var lookalikes map[string]*regexp.Regexp = map[string]*regexp.Regexp{ + "A": regexp.MustCompile(`À|Á|Â|Ã|Ä|Å`), + "AE": regexp.MustCompile(`Æ`), + "C": regexp.MustCompile(`Ç`), + "E": regexp.MustCompile(`È|É|Ê|Ë`), + "G": regexp.MustCompile(`Ğ`), + "I": regexp.MustCompile(`Ì|Í|Î|Ï|İ`), + "N": regexp.MustCompile(`Ñ`), + "O": regexp.MustCompile(`Ò|Ó|Ô|Õ|Ö|Ø`), + "S": regexp.MustCompile(`Ş`), + "U": regexp.MustCompile(`Ù|Ú|Û|Ü`), + "Y": regexp.MustCompile(`Ý`), + "ss": regexp.MustCompile(`ß`), + "a": regexp.MustCompile(`à|á|â|ã|ä|å`), + "ae": regexp.MustCompile(`æ`), + "c": regexp.MustCompile(`ç`), + "e": regexp.MustCompile(`è|é|ê|ë`), + "g": regexp.MustCompile(`ğ`), + "i": regexp.MustCompile(`ì|í|î|ï|ı`), + "n": regexp.MustCompile(`ñ`), + "o": regexp.MustCompile(`ò|ó|ô|õ|ö|ø`), + "s": regexp.MustCompile(`ş`), + "u": regexp.MustCompile(`ù|ú|û|ü|ũ|ū|ŭ|ů|ű|ų`), + "y": regexp.MustCompile(`ý|ÿ`), +} + +// transforms latin characters like é -> e +func (rs *Ruleset) Asciify(word string) string { + for repl, regex := range lookalikes { + word = regex.ReplaceAllString(word, repl) + } + return word +} + +var tablePrefix *regexp.Regexp = regexp.MustCompile(`^[^.]*\.`) + +// "something_like_this" -> "SomethingLikeThis" +func (rs *Ruleset) Typeify(word string) string { + word = tablePrefix.ReplaceAllString(word, "") + return rs.Camelize(rs.Singularize(word)) +} + +// "SomeText" -> "some-text" +func (rs *Ruleset) Dasherize(word string) string { + return rs.seperatedWords(word, "-") +} + +// "1031" -> "1031st" +func (rs *Ruleset) Ordinalize(str string) string { + number, err := strconv.Atoi(str) + if err != nil { + return str + } + switch abs(number) % 100 { + case 11, 12, 13: + return fmt.Sprintf("%dth", number) + default: + switch abs(number) % 10 { + case 1: + return fmt.Sprintf("%dst", number) + case 2: + return fmt.Sprintf("%dnd", number) + case 3: + return fmt.Sprintf("%drd", number) + } + } + return fmt.Sprintf("%dth", number) +} + +///////////////////////////////////////// +// the default global ruleset +////////////////////////////////////////// + +var defaultRuleset *Ruleset + +func init() { + defaultRuleset = NewDefaultRuleset() +} + +func Uncountables() map[string]bool { + return defaultRuleset.Uncountables() +} + +func AddPlural(suffix, replacement string) { + defaultRuleset.AddPlural(suffix, replacement) +} + +func AddSingular(suffix, replacement string) { + defaultRuleset.AddSingular(suffix, replacement) +} + +func AddHuman(suffix, replacement string) { + defaultRuleset.AddHuman(suffix, replacement) +} + +func AddIrregular(singular, plural string) { + defaultRuleset.AddIrregular(singular, plural) +} + +func AddAcronym(word string) { + defaultRuleset.AddAcronym(word) +} + +func AddUncountable(word string) { + defaultRuleset.AddUncountable(word) +} + +func Pluralize(word string) string { + return defaultRuleset.Pluralize(word) +} + +func Singularize(word string) string { + return defaultRuleset.Singularize(word) +} + +func Capitalize(word string) string { + return defaultRuleset.Capitalize(word) +} + +func Camelize(word string) string { + return defaultRuleset.Camelize(word) +} + +func CamelizeDownFirst(word string) string { + return defaultRuleset.CamelizeDownFirst(word) +} + +func Titleize(word string) string { + return defaultRuleset.Titleize(word) +} + +func Underscore(word string) string { + return defaultRuleset.Underscore(word) +} + +func Humanize(word string) string { + return defaultRuleset.Humanize(word) +} + +func ForeignKey(word string) string { + return defaultRuleset.ForeignKey(word) +} + +func ForeignKeyCondensed(word string) string { + return defaultRuleset.ForeignKeyCondensed(word) +} + +func Tableize(word string) string { + return defaultRuleset.Tableize(word) +} + +func Parameterize(word string) string { + return defaultRuleset.Parameterize(word) +} + +func ParameterizeJoin(word, sep string) string { + return defaultRuleset.ParameterizeJoin(word, sep) +} + +func Typeify(word string) string { + return defaultRuleset.Typeify(word) +} + +func Dasherize(word string) string { + return defaultRuleset.Dasherize(word) +} + +func Ordinalize(word string) string { + return defaultRuleset.Ordinalize(word) +} + +func Asciify(word string) string { + return defaultRuleset.Asciify(word) +} + +// helper funcs + +func reverse(s string) string { + o := make([]rune, utf8.RuneCountInString(s)) + i := len(o) + for _, c := range s { + i-- + o[i] = c + } + return string(o) +} + +func isSpacerChar(c rune) bool { + switch { + case c == rune("_"[0]): + return true + case c == rune(" "[0]): + return true + case c == rune(":"[0]): + return true + case c == rune("-"[0]): + return true + } + return false +} + +func splitAtCaseChange(s string) []string { + words := make([]string, 0) + word := make([]rune, 0) + for _, c := range s { + spacer := isSpacerChar(c) + if len(word) > 0 { + if unicode.IsUpper(c) || spacer { + words = append(words, string(word)) + word = make([]rune, 0) + } + } + if !spacer { + word = append(word, unicode.ToLower(c)) + } + } + words = append(words, string(word)) + return words +} + +func splitAtCaseChangeWithTitlecase(s string) []string { + words := make([]string, 0) + word := make([]rune, 0) + for _, c := range s { + spacer := isSpacerChar(c) + if len(word) > 0 { + if unicode.IsUpper(c) || spacer { + words = append(words, string(word)) + word = make([]rune, 0) + } + } + if !spacer { + if len(word) > 0 { + word = append(word, unicode.ToLower(c)) + } else { + word = append(word, unicode.ToUpper(c)) + } + } + } + words = append(words, string(word)) + return words +} + +func replaceLast(s, match, repl string) string { + // reverse strings + srev := reverse(s) + mrev := reverse(match) + rrev := reverse(repl) + // match first and reverse back + return reverse(strings.Replace(srev, mrev, rrev, 1)) +} + +func abs(x int) int { + if x < 0 { + return -x + } + return x +} diff --git a/test/tools/vendor/github.com/go-openapi/jsonpointer/.editorconfig b/test/tools/vendor/github.com/go-openapi/jsonpointer/.editorconfig new file mode 100644 index 0000000000..3152da69a5 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/jsonpointer/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/test/tools/vendor/github.com/go-openapi/jsonpointer/.gitignore b/test/tools/vendor/github.com/go-openapi/jsonpointer/.gitignore new file mode 100644 index 0000000000..769c244007 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/jsonpointer/.gitignore @@ -0,0 +1 @@ +secrets.yml diff --git a/test/tools/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md b/test/tools/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/k8s.io/kubernetes/LICENSE b/test/tools/vendor/github.com/go-openapi/jsonpointer/LICENSE similarity index 100% rename from vendor/k8s.io/kubernetes/LICENSE rename to test/tools/vendor/github.com/go-openapi/jsonpointer/LICENSE diff --git a/test/tools/vendor/github.com/go-openapi/jsonpointer/README.md b/test/tools/vendor/github.com/go-openapi/jsonpointer/README.md new file mode 100644 index 0000000000..813788aff1 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/jsonpointer/README.md @@ -0,0 +1,15 @@ +# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) +An implementation of JSON Pointer - Go language + +## Status +Completed YES + +Tested YES + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +### Note +The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/test/tools/vendor/github.com/go-openapi/jsonpointer/pointer.go b/test/tools/vendor/github.com/go-openapi/jsonpointer/pointer.go new file mode 100644 index 0000000000..7df9853def --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -0,0 +1,390 @@ +// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author sigu-399 +// author-github https://github.com/sigu-399 +// author-mail sigu.399@gmail.com +// +// repository-name jsonpointer +// repository-desc An implementation of JSON Pointer - Go language +// +// description Main and unique file. +// +// created 25-02-2013 + +package jsonpointer + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/go-openapi/swag" +) + +const ( + emptyPointer = `` + pointerSeparator = `/` + + invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator +) + +var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() +var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() + +// JSONPointable is an interface for structs to implement when they need to customize the +// json pointer process +type JSONPointable interface { + JSONLookup(string) (interface{}, error) +} + +// JSONSetable is an interface for structs to implement when they need to customize the +// json pointer process +type JSONSetable interface { + JSONSet(string, interface{}) error +} + +// New creates a new json pointer for the given string +func New(jsonPointerString string) (Pointer, error) { + + var p Pointer + err := p.parse(jsonPointerString) + return p, err + +} + +// Pointer the json pointer reprsentation +type Pointer struct { + referenceTokens []string +} + +// "Constructor", parses the given string JSON pointer +func (p *Pointer) parse(jsonPointerString string) error { + + var err error + + if jsonPointerString != emptyPointer { + if !strings.HasPrefix(jsonPointerString, pointerSeparator) { + err = errors.New(invalidStart) + } else { + referenceTokens := strings.Split(jsonPointerString, pointerSeparator) + for _, referenceToken := range referenceTokens[1:] { + p.referenceTokens = append(p.referenceTokens, referenceToken) + } + } + } + + return err +} + +// Get uses the pointer to retrieve a value from a JSON document +func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { + return p.get(document, swag.DefaultJSONNameProvider) +} + +// Set uses the pointer to set a value from a JSON document +func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) { + return document, p.set(document, value, swag.DefaultJSONNameProvider) +} + +// GetForToken gets a value for a json pointer token 1 level deep +func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { + return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) +} + +// SetForToken gets a value for a json pointer token 1 level deep +func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) { + return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) +} + +func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { + rValue := reflect.Indirect(reflect.ValueOf(node)) + kind := rValue.Kind() + + if rValue.Type().Implements(jsonPointableType) { + r, err := node.(JSONPointable).JSONLookup(decodedToken) + if err != nil { + return nil, kind, err + } + return r, kind, nil + } + + switch kind { + case reflect.Struct: + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return nil, kind, fmt.Errorf("object has no field %q", decodedToken) + } + fld := rValue.FieldByName(nm) + return fld.Interface(), kind, nil + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + mv := rValue.MapIndex(kv) + + if mv.IsValid() { + return mv.Interface(), kind, nil + } + return nil, kind, fmt.Errorf("object has no key %q", decodedToken) + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return nil, kind, err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex) + } + + elem := rValue.Index(tokenIndex) + return elem.Interface(), kind, nil + + default: + return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken) + } + +} + +func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error { + rValue := reflect.Indirect(reflect.ValueOf(node)) + + if ns, ok := node.(JSONSetable); ok { // pointer impl + return ns.JSONSet(decodedToken, data) + } + + if rValue.Type().Implements(jsonSetableType) { + return node.(JSONSetable).JSONSet(decodedToken, data) + } + + switch rValue.Kind() { + case reflect.Struct: + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return fmt.Errorf("object has no field %q", decodedToken) + } + fld := rValue.FieldByName(nm) + if fld.IsValid() { + fld.Set(reflect.ValueOf(data)) + } + return nil + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + rValue.SetMapIndex(kv, reflect.ValueOf(data)) + return nil + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) + } + + elem := rValue.Index(tokenIndex) + if !elem.CanSet() { + return fmt.Errorf("can't set slice index %s to %v", decodedToken, data) + } + elem.Set(reflect.ValueOf(data)) + return nil + + default: + return fmt.Errorf("invalid token reference %q", decodedToken) + } + +} + +func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { + + if nameProvider == nil { + nameProvider = swag.DefaultJSONNameProvider + } + + kind := reflect.Invalid + + // Full document when empty + if len(p.referenceTokens) == 0 { + return node, kind, nil + } + + for _, token := range p.referenceTokens { + + decodedToken := Unescape(token) + + r, knd, err := getSingleImpl(node, decodedToken, nameProvider) + if err != nil { + return nil, knd, err + } + node, kind = r, knd + + } + + rValue := reflect.ValueOf(node) + kind = rValue.Kind() + + return node, kind, nil +} + +func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error { + knd := reflect.ValueOf(node).Kind() + + if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { + return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values") + } + + if nameProvider == nil { + nameProvider = swag.DefaultJSONNameProvider + } + + // Full document when empty + if len(p.referenceTokens) == 0 { + return nil + } + + lastI := len(p.referenceTokens) - 1 + for i, token := range p.referenceTokens { + isLastToken := i == lastI + decodedToken := Unescape(token) + + if isLastToken { + + return setSingleImpl(node, data, decodedToken, nameProvider) + } + + rValue := reflect.Indirect(reflect.ValueOf(node)) + kind := rValue.Kind() + + if rValue.Type().Implements(jsonPointableType) { + r, err := node.(JSONPointable).JSONLookup(decodedToken) + if err != nil { + return err + } + fld := reflect.ValueOf(r) + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { + node = fld.Addr().Interface() + continue + } + node = r + continue + } + + switch kind { + case reflect.Struct: + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return fmt.Errorf("object has no field %q", decodedToken) + } + fld := rValue.FieldByName(nm) + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { + node = fld.Addr().Interface() + continue + } + node = fld.Interface() + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + mv := rValue.MapIndex(kv) + + if !mv.IsValid() { + return fmt.Errorf("object has no key %q", decodedToken) + } + if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr { + node = mv.Addr().Interface() + continue + } + node = mv.Interface() + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) + } + + elem := rValue.Index(tokenIndex) + if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Ptr { + node = elem.Addr().Interface() + continue + } + node = elem.Interface() + + default: + return fmt.Errorf("invalid token reference %q", decodedToken) + } + + } + + return nil +} + +// DecodedTokens returns the decoded tokens +func (p *Pointer) DecodedTokens() []string { + result := make([]string, 0, len(p.referenceTokens)) + for _, t := range p.referenceTokens { + result = append(result, Unescape(t)) + } + return result +} + +// IsEmpty returns true if this is an empty json pointer +// this indicates that it points to the root document +func (p *Pointer) IsEmpty() bool { + return len(p.referenceTokens) == 0 +} + +// Pointer to string representation function +func (p *Pointer) String() string { + + if len(p.referenceTokens) == 0 { + return emptyPointer + } + + pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) + + return pointerString +} + +// Specific JSON pointer encoding here +// ~0 => ~ +// ~1 => / +// ... and vice versa + +const ( + encRefTok0 = `~0` + encRefTok1 = `~1` + decRefTok0 = `~` + decRefTok1 = `/` +) + +// Unescape unescapes a json pointer reference token string to the original representation +func Unescape(token string) string { + step1 := strings.Replace(token, encRefTok1, decRefTok1, -1) + step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1) + return step2 +} + +// Escape escapes a pointer reference token string +func Escape(token string) string { + step1 := strings.Replace(token, decRefTok0, encRefTok0, -1) + step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1) + return step2 +} diff --git a/test/tools/vendor/github.com/go-openapi/jsonreference/.gitignore b/test/tools/vendor/github.com/go-openapi/jsonreference/.gitignore new file mode 100644 index 0000000000..769c244007 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/jsonreference/.gitignore @@ -0,0 +1 @@ +secrets.yml diff --git a/test/tools/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/test/tools/vendor/github.com/go-openapi/jsonreference/.golangci.yml new file mode 100644 index 0000000000..013fc1943a --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -0,0 +1,50 @@ +linters-settings: + govet: + check-shadowing: true + gocyclo: + min-complexity: 30 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 4 + paralleltest: + ignore-missing: true +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoglobals + - godox + - gocognit + - whitespace + - wsl + - funlen + - gochecknoglobals + - gochecknoinits + - scopelint + - wrapcheck + - exhaustivestruct + - exhaustive + - nlreturn + - testpackage + - gci + - gofumpt + - goerr113 + - gomnd + - tparallel + - nestif + - godot + - errorlint + - varcheck + - interfacer + - deadcode + - golint + - ifshort + - structcheck + - nosnakecase + - varnamelen + - exhaustruct diff --git a/test/tools/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md b/test/tools/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/test/tools/vendor/github.com/go-openapi/jsonreference/LICENSE b/test/tools/vendor/github.com/go-openapi/jsonreference/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/jsonreference/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/tools/vendor/github.com/go-openapi/jsonreference/README.md b/test/tools/vendor/github.com/go-openapi/jsonreference/README.md new file mode 100644 index 0000000000..b94753aa52 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/jsonreference/README.md @@ -0,0 +1,15 @@ +# gojsonreference [![Build Status](https://travis-ci.org/go-openapi/jsonreference.svg?branch=master)](https://travis-ci.org/go-openapi/jsonreference) [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference) +An implementation of JSON Reference - Go language + +## Status +Feature complete. Stable API + +## Dependencies +https://github.com/go-openapi/jsonpointer + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/test/tools/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/test/tools/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go new file mode 100644 index 0000000000..f0610cf1e5 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go @@ -0,0 +1,69 @@ +package internal + +import ( + "net/url" + "regexp" + "strings" +) + +const ( + defaultHTTPPort = ":80" + defaultHTTPSPort = ":443" +) + +// Regular expressions used by the normalizations +var rxPort = regexp.MustCompile(`(:\d+)/?$`) +var rxDupSlashes = regexp.MustCompile(`/{2,}`) + +// NormalizeURL will normalize the specified URL +// This was added to replace a previous call to the no longer maintained purell library: +// The call that was used looked like the following: +// +// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) +// +// To explain all that was included in the call above, purell.FlagsSafe was really just the following: +// - FlagLowercaseScheme +// - FlagLowercaseHost +// - FlagRemoveDefaultPort +// - FlagRemoveDuplicateSlashes (and this was mixed in with the |) +// +// This also normalizes the URL into its urlencoded form by removing RawPath and RawFragment. +func NormalizeURL(u *url.URL) { + lowercaseScheme(u) + lowercaseHost(u) + removeDefaultPort(u) + removeDuplicateSlashes(u) + + u.RawPath = "" + u.RawFragment = "" +} + +func lowercaseScheme(u *url.URL) { + if len(u.Scheme) > 0 { + u.Scheme = strings.ToLower(u.Scheme) + } +} + +func lowercaseHost(u *url.URL) { + if len(u.Host) > 0 { + u.Host = strings.ToLower(u.Host) + } +} + +func removeDefaultPort(u *url.URL) { + if len(u.Host) > 0 { + scheme := strings.ToLower(u.Scheme) + u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { + if (scheme == "http" && val == defaultHTTPPort) || (scheme == "https" && val == defaultHTTPSPort) { + return "" + } + return val + }) + } +} + +func removeDuplicateSlashes(u *url.URL) { + if len(u.Path) > 0 { + u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") + } +} diff --git a/test/tools/vendor/github.com/go-openapi/jsonreference/reference.go b/test/tools/vendor/github.com/go-openapi/jsonreference/reference.go new file mode 100644 index 0000000000..cfdef03e5d --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/jsonreference/reference.go @@ -0,0 +1,158 @@ +// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author sigu-399 +// author-github https://github.com/sigu-399 +// author-mail sigu.399@gmail.com +// +// repository-name jsonreference +// repository-desc An implementation of JSON Reference - Go language +// +// description Main and unique file. +// +// created 26-02-2013 + +package jsonreference + +import ( + "errors" + "net/url" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/jsonreference/internal" +) + +const ( + fragmentRune = `#` +) + +// New creates a new reference for the given string +func New(jsonReferenceString string) (Ref, error) { + + var r Ref + err := r.parse(jsonReferenceString) + return r, err + +} + +// MustCreateRef parses the ref string and panics when it's invalid. +// Use the New method for a version that returns an error +func MustCreateRef(ref string) Ref { + r, err := New(ref) + if err != nil { + panic(err) + } + return r +} + +// Ref represents a json reference object +type Ref struct { + referenceURL *url.URL + referencePointer jsonpointer.Pointer + + HasFullURL bool + HasURLPathOnly bool + HasFragmentOnly bool + HasFileScheme bool + HasFullFilePath bool +} + +// GetURL gets the URL for this reference +func (r *Ref) GetURL() *url.URL { + return r.referenceURL +} + +// GetPointer gets the json pointer for this reference +func (r *Ref) GetPointer() *jsonpointer.Pointer { + return &r.referencePointer +} + +// String returns the best version of the url for this reference +func (r *Ref) String() string { + + if r.referenceURL != nil { + return r.referenceURL.String() + } + + if r.HasFragmentOnly { + return fragmentRune + r.referencePointer.String() + } + + return r.referencePointer.String() +} + +// IsRoot returns true if this reference is a root document +func (r *Ref) IsRoot() bool { + return r.referenceURL != nil && + !r.IsCanonical() && + !r.HasURLPathOnly && + r.referenceURL.Fragment == "" +} + +// IsCanonical returns true when this pointer starts with http(s):// or file:// +func (r *Ref) IsCanonical() bool { + return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL) +} + +// "Constructor", parses the given string JSON reference +func (r *Ref) parse(jsonReferenceString string) error { + + parsed, err := url.Parse(jsonReferenceString) + if err != nil { + return err + } + + internal.NormalizeURL(parsed) + + r.referenceURL = parsed + refURL := r.referenceURL + + if refURL.Scheme != "" && refURL.Host != "" { + r.HasFullURL = true + } else { + if refURL.Path != "" { + r.HasURLPathOnly = true + } else if refURL.RawQuery == "" && refURL.Fragment != "" { + r.HasFragmentOnly = true + } + } + + r.HasFileScheme = refURL.Scheme == "file" + r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/") + + // invalid json-pointer error means url has no json-pointer fragment. simply ignore error + r.referencePointer, _ = jsonpointer.New(refURL.Fragment) + + return nil +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + childURL := child.GetURL() + parentURL := r.GetURL() + if childURL == nil { + return nil, errors.New("child url is nil") + } + if parentURL == nil { + return &child, nil + } + + ref, err := New(parentURL.ResolveReference(childURL).String()) + if err != nil { + return nil, err + } + return &ref, nil +} diff --git a/test/tools/vendor/github.com/go-openapi/loads/.editorconfig b/test/tools/vendor/github.com/go-openapi/loads/.editorconfig new file mode 100644 index 0000000000..3152da69a5 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/loads/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/test/tools/vendor/github.com/go-openapi/loads/.gitignore b/test/tools/vendor/github.com/go-openapi/loads/.gitignore new file mode 100644 index 0000000000..e4f15f17bf --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/loads/.gitignore @@ -0,0 +1,4 @@ +secrets.yml +coverage.out +profile.cov +profile.out diff --git a/test/tools/vendor/github.com/go-openapi/loads/.golangci.yml b/test/tools/vendor/github.com/go-openapi/loads/.golangci.yml new file mode 100644 index 0000000000..d48b4a5156 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/loads/.golangci.yml @@ -0,0 +1,44 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 30 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 4 + +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoglobals + - gochecknoinits + - godox + - gocognit + - whitespace + - wsl + - funlen + - gochecknoglobals + - gochecknoinits + - scopelint + - wrapcheck + - exhaustivestruct + - exhaustive + - nlreturn + - testpackage + - gci + - gofumpt + - goerr113 + - gomnd + - tparallel + - nestif + - godot + - errorlint + - paralleltest diff --git a/test/tools/vendor/github.com/go-openapi/loads/.travis.yml b/test/tools/vendor/github.com/go-openapi/loads/.travis.yml new file mode 100644 index 0000000000..cd4a7c331b --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/loads/.travis.yml @@ -0,0 +1,25 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.16.x +- 1.x +install: +- go get gotest.tools/gotestsum +language: go +arch: +- amd64 +- ppc64le +jobs: + include: + # include linting job, but only for latest go version and amd64 arch + - go: 1.x + arch: amd64 + install: + go get github.com/golangci/golangci-lint/cmd/golangci-lint + script: + - golangci-lint run --new-from-rev master +notifications: + slack: + secure: OxkPwVp35qBTUilgWC8xykSj+sGMcj0h8IIOKD+Rflx2schZVlFfdYdyVBM+s9OqeOfvtuvnR9v1Ye2rPKAvcjWdC4LpRGUsgmItZaI6Um8Aj6+K9udCw5qrtZVfOVmRu8LieH//XznWWKdOultUuniW0MLqw5+II87Gd00RWbCGi0hk0PykHe7uK+PDA2BEbqyZ2WKKYCvfB3j+0nrFOHScXqnh0V05l2E83J4+Sgy1fsPy+1WdX58ZlNBG333ibaC1FS79XvKSmTgKRkx3+YBo97u6ZtUmJa5WZjf2OdLG3KIckGWAv6R5xgxeU31N0Ng8L332w/Edpp2O/M2bZwdnKJ8hJQikXIAQbICbr+lTDzsoNzMdEIYcHpJ5hjPbiUl3Bmd+Jnsjf5McgAZDiWIfpCKZ29tPCEkVwRsOCqkyPRMNMzHHmoja495P5jR+ODS7+J8RFg5xgcnOgpP9D4Wlhztlf5WyZMpkLxTUD+bZq2SRf50HfHFXTkfq22zPl3d1eq0yrLwh/Z/fWKkfb6SyysROL8y6s8u3dpFX1YHSg0BR6i913h4aoZw9B2BG27cafLLTwKYsp2dFo1PWl4O6u9giFJIeqwloZHLKKrwh0cBFhB7RH0I58asxkZpCH6uWjJierahmHe7iS+E6i+9oCHkOZ59hmCYNimIs3hM= +script: +- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/test/tools/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md b/test/tools/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/test/tools/vendor/github.com/go-openapi/loads/LICENSE b/test/tools/vendor/github.com/go-openapi/loads/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/loads/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/tools/vendor/github.com/go-openapi/loads/README.md b/test/tools/vendor/github.com/go-openapi/loads/README.md new file mode 100644 index 0000000000..df1f626462 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/loads/README.md @@ -0,0 +1,6 @@ +# Loads OAI specs [![Build Status](https://travis-ci.org/go-openapi/loads.svg?branch=master)](https://travis-ci.org/go-openapi/loads) [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![Actions/Go Test Status](https://github.com/go-openapi/loads/workflows/Go%20Test/badge.svg)](https://github.com/go-openapi/loads/actions?query=workflow%3A"Go+Test") + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/loads)](https://goreportcard.com/report/github.com/go-openapi/loads) + +Loading of OAI specification documents from local or remote locations. Supports JSON and YAML documents. diff --git a/test/tools/vendor/github.com/go-openapi/loads/doc.go b/test/tools/vendor/github.com/go-openapi/loads/doc.go new file mode 100644 index 0000000000..3046da4cef --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/loads/doc.go @@ -0,0 +1,21 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package loads provides document loading methods for swagger (OAI) specifications. + +It is used by other go-openapi packages to load and run analysis on local or remote spec documents. + +*/ +package loads diff --git a/test/tools/vendor/github.com/go-openapi/loads/fmts/yaml.go b/test/tools/vendor/github.com/go-openapi/loads/fmts/yaml.go new file mode 100644 index 0000000000..1cef2ac22b --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/loads/fmts/yaml.go @@ -0,0 +1,30 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fmts + +import "github.com/go-openapi/swag" + +var ( + // YAMLMatcher matches yaml + YAMLMatcher = swag.YAMLMatcher + // YAMLToJSON converts YAML unmarshaled data into json compatible data + YAMLToJSON = swag.YAMLToJSON + // BytesToYAMLDoc converts raw bytes to a map[string]interface{} + BytesToYAMLDoc = swag.BytesToYAMLDoc + // YAMLDoc loads a yaml document from either http or a file and converts it to json + YAMLDoc = swag.YAMLDoc + // YAMLData loads a yaml document from either http or a file + YAMLData = swag.YAMLData +) diff --git a/test/tools/vendor/github.com/go-openapi/loads/loaders.go b/test/tools/vendor/github.com/go-openapi/loads/loaders.go new file mode 100644 index 0000000000..44bd32b5b8 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/loads/loaders.go @@ -0,0 +1,134 @@ +package loads + +import ( + "encoding/json" + "errors" + "net/url" + + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +var ( + // Default chain of loaders, defined at the package level. + // + // By default this matches json and yaml documents. + // + // May be altered with AddLoader(). + loaders *loader +) + +func init() { + jsonLoader := &loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Match: func(pth string) bool { + return true + }, + Fn: JSONDoc, + }, + } + + loaders = jsonLoader.WithHead(&loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Match: swag.YAMLMatcher, + Fn: swag.YAMLDoc, + }, + }) + + // sets the global default loader for go-openapi/spec + spec.PathLoader = loaders.Load +} + +// DocLoader represents a doc loader type +type DocLoader func(string) (json.RawMessage, error) + +// DocMatcher represents a predicate to check if a loader matches +type DocMatcher func(string) bool + +// DocLoaderWithMatch describes a loading function for a given extension match. +type DocLoaderWithMatch struct { + Fn DocLoader + Match DocMatcher +} + +// NewDocLoaderWithMatch builds a DocLoaderWithMatch to be used in load options +func NewDocLoaderWithMatch(fn DocLoader, matcher DocMatcher) DocLoaderWithMatch { + return DocLoaderWithMatch{ + Fn: fn, + Match: matcher, + } +} + +type loader struct { + DocLoaderWithMatch + Next *loader +} + +// WithHead adds a loader at the head of the current stack +func (l *loader) WithHead(head *loader) *loader { + if head == nil { + return l + } + head.Next = l + return head +} + +// WithNext adds a loader at the trail of the current stack +func (l *loader) WithNext(next *loader) *loader { + l.Next = next + return next +} + +// Load the raw document from path +func (l *loader) Load(path string) (json.RawMessage, error) { + _, erp := url.Parse(path) + if erp != nil { + return nil, erp + } + + var lastErr error = errors.New("no loader matched") // default error if no match was found + for ldr := l; ldr != nil; ldr = ldr.Next { + if ldr.Match != nil && !ldr.Match(path) { + continue + } + + // try then move to next one if there is an error + b, err := ldr.Fn(path) + if err == nil { + return b, nil + } + + lastErr = err + } + + return nil, lastErr +} + +// JSONDoc loads a json document from either a file or a remote url +func JSONDoc(path string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil +} + +// AddLoader for a document, executed before other previously set loaders. +// +// This sets the configuration at the package level. +// +// NOTE: +// * this updates the default loader used by github.com/go-openapi/spec +// * since this sets package level globals, you shouln't call this concurrently +// +func AddLoader(predicate DocMatcher, load DocLoader) { + loaders = loaders.WithHead(&loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Match: predicate, + Fn: load, + }, + }) + + // sets the global default loader for go-openapi/spec + spec.PathLoader = loaders.Load +} diff --git a/test/tools/vendor/github.com/go-openapi/loads/options.go b/test/tools/vendor/github.com/go-openapi/loads/options.go new file mode 100644 index 0000000000..f8305d5607 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/loads/options.go @@ -0,0 +1,61 @@ +package loads + +type options struct { + loader *loader +} + +func defaultOptions() *options { + return &options{ + loader: loaders, + } +} + +func loaderFromOptions(options []LoaderOption) *loader { + opts := defaultOptions() + for _, apply := range options { + apply(opts) + } + + return opts.loader +} + +// LoaderOption allows to fine-tune the spec loader behavior +type LoaderOption func(*options) + +// WithDocLoader sets a custom loader for loading specs +func WithDocLoader(l DocLoader) LoaderOption { + return func(opt *options) { + if l == nil { + return + } + opt.loader = &loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Fn: l, + }, + } + } +} + +// WithDocLoaderMatches sets a chain of custom loaders for loading specs +// for different extension matches. +// +// Loaders are executed in the order of provided DocLoaderWithMatch'es. +func WithDocLoaderMatches(l ...DocLoaderWithMatch) LoaderOption { + return func(opt *options) { + var final, prev *loader + for _, ldr := range l { + if ldr.Fn == nil { + continue + } + + if prev == nil { + final = &loader{DocLoaderWithMatch: ldr} + prev = final + continue + } + + prev = prev.WithNext(&loader{DocLoaderWithMatch: ldr}) + } + opt.loader = final + } +} diff --git a/test/tools/vendor/github.com/go-openapi/loads/spec.go b/test/tools/vendor/github.com/go-openapi/loads/spec.go new file mode 100644 index 0000000000..93c8d4b895 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/loads/spec.go @@ -0,0 +1,266 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loads + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "fmt" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +func init() { + gob.Register(map[string]interface{}{}) + gob.Register([]interface{}{}) +} + +// Document represents a swagger spec document +type Document struct { + // specAnalyzer + Analyzer *analysis.Spec + spec *spec.Swagger + specFilePath string + origSpec *spec.Swagger + schema *spec.Schema + raw json.RawMessage + pathLoader *loader +} + +// JSONSpec loads a spec from a json document +func JSONSpec(path string, options ...LoaderOption) (*Document, error) { + data, err := JSONDoc(path) + if err != nil { + return nil, err + } + // convert to json + return Analyzed(data, "", options...) +} + +// Embedded returns a Document based on embedded specs. No analysis is required +func Embedded(orig, flat json.RawMessage, options ...LoaderOption) (*Document, error) { + var origSpec, flatSpec spec.Swagger + if err := json.Unmarshal(orig, &origSpec); err != nil { + return nil, err + } + if err := json.Unmarshal(flat, &flatSpec); err != nil { + return nil, err + } + return &Document{ + raw: orig, + origSpec: &origSpec, + spec: &flatSpec, + pathLoader: loaderFromOptions(options), + }, nil +} + +// Spec loads a new spec document from a local or remote path +func Spec(path string, options ...LoaderOption) (*Document, error) { + + ldr := loaderFromOptions(options) + + b, err := ldr.Load(path) + if err != nil { + return nil, err + } + + document, err := Analyzed(b, "", options...) + if err != nil { + return nil, err + } + + if document != nil { + document.specFilePath = path + document.pathLoader = ldr + } + + return document, err +} + +// Analyzed creates a new analyzed spec document for a root json.RawMessage. +func Analyzed(data json.RawMessage, version string, options ...LoaderOption) (*Document, error) { + if version == "" { + version = "2.0" + } + if version != "2.0" { + return nil, fmt.Errorf("spec version %q is not supported", version) + } + + raw, err := trimData(data) // trim blanks, then convert yaml docs into json + if err != nil { + return nil, err + } + + swspec := new(spec.Swagger) + if err = json.Unmarshal(raw, swspec); err != nil { + return nil, err + } + + origsqspec, err := cloneSpec(swspec) + if err != nil { + return nil, err + } + + d := &Document{ + Analyzer: analysis.New(swspec), + schema: spec.MustLoadSwagger20Schema(), + spec: swspec, + raw: raw, + origSpec: origsqspec, + pathLoader: loaderFromOptions(options), + } + + return d, nil +} + +func trimData(in json.RawMessage) (json.RawMessage, error) { + trimmed := bytes.TrimSpace(in) + if len(trimmed) == 0 { + return in, nil + } + + if trimmed[0] == '{' || trimmed[0] == '[' { + return trimmed, nil + } + + // assume yaml doc: convert it to json + yml, err := swag.BytesToYAMLDoc(trimmed) + if err != nil { + return nil, fmt.Errorf("analyzed: %v", err) + } + + d, err := swag.YAMLToJSON(yml) + if err != nil { + return nil, fmt.Errorf("analyzed: %v", err) + } + + return d, nil +} + +// Expanded expands the ref fields in the spec document and returns a new spec document +func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { + + swspec := new(spec.Swagger) + if err := json.Unmarshal(d.raw, swspec); err != nil { + return nil, err + } + + var expandOptions *spec.ExpandOptions + if len(options) > 0 { + expandOptions = options[0] + } else { + expandOptions = &spec.ExpandOptions{ + RelativeBase: d.specFilePath, + } + } + + if expandOptions.PathLoader == nil { + if d.pathLoader != nil { + // use loader from Document options + expandOptions.PathLoader = d.pathLoader.Load + } else { + // use package level loader + expandOptions.PathLoader = loaders.Load + } + } + + if err := spec.ExpandSpec(swspec, expandOptions); err != nil { + return nil, err + } + + dd := &Document{ + Analyzer: analysis.New(swspec), + spec: swspec, + specFilePath: d.specFilePath, + schema: spec.MustLoadSwagger20Schema(), + raw: d.raw, + origSpec: d.origSpec, + } + return dd, nil +} + +// BasePath the base path for this spec +func (d *Document) BasePath() string { + return d.spec.BasePath +} + +// Version returns the version of this spec +func (d *Document) Version() string { + return d.spec.Swagger +} + +// Schema returns the swagger 2.0 schema +func (d *Document) Schema() *spec.Schema { + return d.schema +} + +// Spec returns the swagger spec object model +func (d *Document) Spec() *spec.Swagger { + return d.spec +} + +// Host returns the host for the API +func (d *Document) Host() string { + return d.spec.Host +} + +// Raw returns the raw swagger spec as json bytes +func (d *Document) Raw() json.RawMessage { + return d.raw +} + +// OrigSpec yields the original spec +func (d *Document) OrigSpec() *spec.Swagger { + return d.origSpec +} + +// ResetDefinitions gives a shallow copy with the models reset to the original spec +func (d *Document) ResetDefinitions() *Document { + defs := make(map[string]spec.Schema, len(d.origSpec.Definitions)) + for k, v := range d.origSpec.Definitions { + defs[k] = v + } + + d.spec.Definitions = defs + return d +} + +// Pristine creates a new pristine document instance based on the input data +func (d *Document) Pristine() *Document { + dd, _ := Analyzed(d.Raw(), d.Version()) + dd.pathLoader = d.pathLoader + return dd +} + +// SpecFilePath returns the file path of the spec if one is defined +func (d *Document) SpecFilePath() string { + return d.specFilePath +} + +func cloneSpec(src *spec.Swagger) (*spec.Swagger, error) { + var b bytes.Buffer + if err := gob.NewEncoder(&b).Encode(src); err != nil { + return nil, err + } + + var dst spec.Swagger + if err := gob.NewDecoder(&b).Decode(&dst); err != nil { + return nil, err + } + return &dst, nil +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/.editorconfig b/test/tools/vendor/github.com/go-openapi/runtime/.editorconfig new file mode 100644 index 0000000000..3152da69a5 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/test/tools/vendor/github.com/go-openapi/runtime/.gitattributes b/test/tools/vendor/github.com/go-openapi/runtime/.gitattributes new file mode 100644 index 0000000000..d207b1802b --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/.gitattributes @@ -0,0 +1 @@ +*.go text eol=lf diff --git a/test/tools/vendor/github.com/go-openapi/runtime/.gitignore b/test/tools/vendor/github.com/go-openapi/runtime/.gitignore new file mode 100644 index 0000000000..fea8b84eca --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/.gitignore @@ -0,0 +1,5 @@ +secrets.yml +coverage.out +*.cov +*.out +playground diff --git a/test/tools/vendor/github.com/go-openapi/runtime/.golangci.yml b/test/tools/vendor/github.com/go-openapi/runtime/.golangci.yml new file mode 100644 index 0000000000..b1aa7928a7 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/.golangci.yml @@ -0,0 +1,44 @@ +linters-settings: + govet: + # Using err repeatedly considered as shadowing. + check-shadowing: false + golint: + min-confidence: 0 + gocyclo: + min-complexity: 30 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 4 +linters: + disable: + - maligned + - lll + - gochecknoglobals + - godox + - gocognit + - whitespace + - wsl + - funlen + - gochecknoglobals + - gochecknoinits + - scopelint + - wrapcheck + - exhaustivestruct + - exhaustive + - nlreturn + - testpackage + - gci + - gofumpt + - goerr113 + - gomnd + - tparallel + - nestif + - godot + - errorlint + - noctx + - interfacer + - nilerr diff --git a/test/tools/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md b/test/tools/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/test/tools/vendor/github.com/go-openapi/runtime/LICENSE b/test/tools/vendor/github.com/go-openapi/runtime/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/tools/vendor/github.com/go-openapi/runtime/README.md b/test/tools/vendor/github.com/go-openapi/runtime/README.md new file mode 100644 index 0000000000..5b1ec64945 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/README.md @@ -0,0 +1,7 @@ +# runtime [![Build Status](https://travis-ci.org/go-openapi/runtime.svg?branch=client-context)](https://travis-ci.org/go-openapi/runtime) [![codecov](https://codecov.io/gh/go-openapi/runtime/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/runtime) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/runtime?status.svg)](http://godoc.org/github.com/go-openapi/runtime) + +# golang Open-API toolkit - runtime + +The runtime component for use in codegeneration or as untyped usage. diff --git a/test/tools/vendor/github.com/go-openapi/runtime/bytestream.go b/test/tools/vendor/github.com/go-openapi/runtime/bytestream.go new file mode 100644 index 0000000000..6eb6ceb5c5 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/bytestream.go @@ -0,0 +1,169 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + + "github.com/go-openapi/swag" +) + +func defaultCloser() error { return nil } + +type byteStreamOpt func(opts *byteStreamOpts) + +// ClosesStream when the bytestream consumer or producer is finished +func ClosesStream(opts *byteStreamOpts) { + opts.Close = true +} + +type byteStreamOpts struct { + Close bool +} + +// ByteStreamConsumer creates a consumer for byte streams, +// takes a Writer/BinaryUnmarshaler interface or binary slice by reference, +// and reads from the provided reader +func ByteStreamConsumer(opts ...byteStreamOpt) Consumer { + var vals byteStreamOpts + for _, opt := range opts { + opt(&vals) + } + + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + if reader == nil { + return errors.New("ByteStreamConsumer requires a reader") // early exit + } + + close := defaultCloser + if vals.Close { + if cl, ok := reader.(io.Closer); ok { + close = cl.Close + } + } + //nolint:errcheck // closing a reader wouldn't fail. + defer close() + + if wrtr, ok := data.(io.Writer); ok { + _, err := io.Copy(wrtr, reader) + return err + } + + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(reader) + if err != nil { + return err + } + b := buf.Bytes() + + if bu, ok := data.(encoding.BinaryUnmarshaler); ok { + return bu.UnmarshalBinary(b) + } + + if data != nil { + if str, ok := data.(*string); ok { + *str = string(b) + return nil + } + } + + if t := reflect.TypeOf(data); data != nil && t.Kind() == reflect.Ptr { + v := reflect.Indirect(reflect.ValueOf(data)) + if t = v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { + v.SetBytes(b) + return nil + } + } + + return fmt.Errorf("%v (%T) is not supported by the ByteStreamConsumer, %s", + data, data, "can be resolved by supporting Writer/BinaryUnmarshaler interface") + }) +} + +// ByteStreamProducer creates a producer for byte streams, +// takes a Reader/BinaryMarshaler interface or binary slice, +// and writes to a writer (essentially a pipe) +func ByteStreamProducer(opts ...byteStreamOpt) Producer { + var vals byteStreamOpts + for _, opt := range opts { + opt(&vals) + } + return ProducerFunc(func(writer io.Writer, data interface{}) error { + if writer == nil { + return errors.New("ByteStreamProducer requires a writer") // early exit + } + close := defaultCloser + if vals.Close { + if cl, ok := writer.(io.Closer); ok { + close = cl.Close + } + } + //nolint:errcheck // TODO: closing a writer would fail. + defer close() + + if rc, ok := data.(io.ReadCloser); ok { + defer rc.Close() + } + + if rdr, ok := data.(io.Reader); ok { + _, err := io.Copy(writer, rdr) + return err + } + + if bm, ok := data.(encoding.BinaryMarshaler); ok { + bytes, err := bm.MarshalBinary() + if err != nil { + return err + } + + _, err = writer.Write(bytes) + return err + } + + if data != nil { + if str, ok := data.(string); ok { + _, err := writer.Write([]byte(str)) + return err + } + + if e, ok := data.(error); ok { + _, err := writer.Write([]byte(e.Error())) + return err + } + + v := reflect.Indirect(reflect.ValueOf(data)) + if t := v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { + _, err := writer.Write(v.Bytes()) + return err + } + if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { + b, err := swag.WriteJSON(data) + if err != nil { + return err + } + _, err = writer.Write(b) + return err + } + } + + return fmt.Errorf("%v (%T) is not supported by the ByteStreamProducer, %s", + data, data, "can be resolved by supporting Reader/BinaryMarshaler interface") + }) +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/client_auth_info.go b/test/tools/vendor/github.com/go-openapi/runtime/client_auth_info.go new file mode 100644 index 0000000000..c6c97d9a7c --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/client_auth_info.go @@ -0,0 +1,30 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import "github.com/go-openapi/strfmt" + +// A ClientAuthInfoWriterFunc converts a function to a request writer interface +type ClientAuthInfoWriterFunc func(ClientRequest, strfmt.Registry) error + +// AuthenticateRequest adds authentication data to the request +func (fn ClientAuthInfoWriterFunc) AuthenticateRequest(req ClientRequest, reg strfmt.Registry) error { + return fn(req, reg) +} + +// A ClientAuthInfoWriter implementor knows how to write authentication info to a request +type ClientAuthInfoWriter interface { + AuthenticateRequest(ClientRequest, strfmt.Registry) error +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/client_operation.go b/test/tools/vendor/github.com/go-openapi/runtime/client_operation.go new file mode 100644 index 0000000000..fa21eacf33 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/client_operation.go @@ -0,0 +1,41 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "context" + "net/http" +) + +// ClientOperation represents the context for a swagger operation to be submitted to the transport +type ClientOperation struct { + ID string + Method string + PathPattern string + ProducesMediaTypes []string + ConsumesMediaTypes []string + Schemes []string + AuthInfo ClientAuthInfoWriter + Params ClientRequestWriter + Reader ClientResponseReader + Context context.Context + Client *http.Client +} + +// A ClientTransport implementor knows how to submit Request objects to some destination +type ClientTransport interface { + //Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error) + Submit(*ClientOperation) (interface{}, error) +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/client_request.go b/test/tools/vendor/github.com/go-openapi/runtime/client_request.go new file mode 100644 index 0000000000..d4d2b58f2b --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/client_request.go @@ -0,0 +1,152 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "io" + "net/http" + "net/url" + "time" + + "github.com/go-openapi/strfmt" +) + +// ClientRequestWriterFunc converts a function to a request writer interface +type ClientRequestWriterFunc func(ClientRequest, strfmt.Registry) error + +// WriteToRequest adds data to the request +func (fn ClientRequestWriterFunc) WriteToRequest(req ClientRequest, reg strfmt.Registry) error { + return fn(req, reg) +} + +// ClientRequestWriter is an interface for things that know how to write to a request +type ClientRequestWriter interface { + WriteToRequest(ClientRequest, strfmt.Registry) error +} + +// ClientRequest is an interface for things that know how to +// add information to a swagger client request +type ClientRequest interface { + SetHeaderParam(string, ...string) error + + GetHeaderParams() http.Header + + SetQueryParam(string, ...string) error + + SetFormParam(string, ...string) error + + SetPathParam(string, string) error + + GetQueryParams() url.Values + + SetFileParam(string, ...NamedReadCloser) error + + SetBodyParam(interface{}) error + + SetTimeout(time.Duration) error + + GetMethod() string + + GetPath() string + + GetBody() []byte + + GetBodyParam() interface{} + + GetFileParam() map[string][]NamedReadCloser +} + +// NamedReadCloser represents a named ReadCloser interface +type NamedReadCloser interface { + io.ReadCloser + Name() string +} + +// NamedReader creates a NamedReadCloser for use as file upload +func NamedReader(name string, rdr io.Reader) NamedReadCloser { + rc, ok := rdr.(io.ReadCloser) + if !ok { + rc = io.NopCloser(rdr) + } + return &namedReadCloser{ + name: name, + cr: rc, + } +} + +type namedReadCloser struct { + name string + cr io.ReadCloser +} + +func (n *namedReadCloser) Close() error { + return n.cr.Close() +} +func (n *namedReadCloser) Read(p []byte) (int, error) { + return n.cr.Read(p) +} +func (n *namedReadCloser) Name() string { + return n.name +} + +type TestClientRequest struct { + Headers http.Header + Body interface{} +} + +func (t *TestClientRequest) SetHeaderParam(name string, values ...string) error { + if t.Headers == nil { + t.Headers = make(http.Header) + } + t.Headers.Set(name, values[0]) + return nil +} + +func (t *TestClientRequest) SetQueryParam(_ string, _ ...string) error { return nil } + +func (t *TestClientRequest) SetFormParam(_ string, _ ...string) error { return nil } + +func (t *TestClientRequest) SetPathParam(_ string, _ string) error { return nil } + +func (t *TestClientRequest) SetFileParam(_ string, _ ...NamedReadCloser) error { return nil } + +func (t *TestClientRequest) SetBodyParam(body interface{}) error { + t.Body = body + return nil +} + +func (t *TestClientRequest) SetTimeout(time.Duration) error { + return nil +} + +func (t *TestClientRequest) GetQueryParams() url.Values { return nil } + +func (t *TestClientRequest) GetMethod() string { return "" } + +func (t *TestClientRequest) GetPath() string { return "" } + +func (t *TestClientRequest) GetBody() []byte { return nil } + +func (t *TestClientRequest) GetBodyParam() interface{} { + return t.Body +} + +func (t *TestClientRequest) GetFileParam() map[string][]NamedReadCloser { + return nil +} + +func (t *TestClientRequest) GetHeaderParams() http.Header { + return t.Headers +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/client_response.go b/test/tools/vendor/github.com/go-openapi/runtime/client_response.go new file mode 100644 index 0000000000..0d1691149d --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/client_response.go @@ -0,0 +1,110 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "encoding/json" + "fmt" + "io" +) + +// A ClientResponse represents a client response +// This bridges between responses obtained from different transports +type ClientResponse interface { + Code() int + Message() string + GetHeader(string) string + GetHeaders(string) []string + Body() io.ReadCloser +} + +// A ClientResponseReaderFunc turns a function into a ClientResponseReader interface implementation +type ClientResponseReaderFunc func(ClientResponse, Consumer) (interface{}, error) + +// ReadResponse reads the response +func (read ClientResponseReaderFunc) ReadResponse(resp ClientResponse, consumer Consumer) (interface{}, error) { + return read(resp, consumer) +} + +// A ClientResponseReader is an interface for things want to read a response. +// An application of this is to create structs from response values +type ClientResponseReader interface { + ReadResponse(ClientResponse, Consumer) (interface{}, error) +} + +// NewAPIError creates a new API error +func NewAPIError(opName string, payload interface{}, code int) *APIError { + return &APIError{ + OperationName: opName, + Response: payload, + Code: code, + } +} + +// APIError wraps an error model and captures the status code +type APIError struct { + OperationName string + Response interface{} + Code int +} + +func (o *APIError) Error() string { + var resp []byte + if err, ok := o.Response.(error); ok { + resp = []byte("'" + err.Error() + "'") + } else { + resp, _ = json.Marshal(o.Response) + } + return fmt.Sprintf("%s (status %d): %s", o.OperationName, o.Code, resp) +} + +func (o *APIError) String() string { + return o.Error() +} + +// IsSuccess returns true when this elapse o k response returns a 2xx status code +func (o *APIError) IsSuccess() bool { + return o.Code/100 == 2 +} + +// IsRedirect returns true when this elapse o k response returns a 3xx status code +func (o *APIError) IsRedirect() bool { + return o.Code/100 == 3 +} + +// IsClientError returns true when this elapse o k response returns a 4xx status code +func (o *APIError) IsClientError() bool { + return o.Code/100 == 4 +} + +// IsServerError returns true when this elapse o k response returns a 5xx status code +func (o *APIError) IsServerError() bool { + return o.Code/100 == 5 +} + +// IsCode returns true when this elapse o k response returns a 4xx status code +func (o *APIError) IsCode(code int) bool { + return o.Code == code +} + +// A ClientResponseStatus is a common interface implemented by all responses on the generated code +// You can use this to treat any client response based on status code +type ClientResponseStatus interface { + IsSuccess() bool + IsRedirect() bool + IsClientError() bool + IsServerError() bool + IsCode(int) bool +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/constants.go b/test/tools/vendor/github.com/go-openapi/runtime/constants.go new file mode 100644 index 0000000000..515969242c --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/constants.go @@ -0,0 +1,49 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +const ( + // HeaderContentType represents a http content-type header, it's value is supposed to be a mime type + HeaderContentType = "Content-Type" + + // HeaderTransferEncoding represents a http transfer-encoding header. + HeaderTransferEncoding = "Transfer-Encoding" + + // HeaderAccept the Accept header + HeaderAccept = "Accept" + // HeaderAuthorization the Authorization header + HeaderAuthorization = "Authorization" + + charsetKey = "charset" + + // DefaultMime the default fallback mime type + DefaultMime = "application/octet-stream" + // JSONMime the json mime type + JSONMime = "application/json" + // YAMLMime the yaml mime type + YAMLMime = "application/x-yaml" + // XMLMime the xml mime type + XMLMime = "application/xml" + // TextMime the text mime type + TextMime = "text/plain" + // HTMLMime the html mime type + HTMLMime = "text/html" + // CSVMime the csv mime type + CSVMime = "text/csv" + // MultipartFormMime the multipart form mime type + MultipartFormMime = "multipart/form-data" + // URLencodedFormMime the url encoded form mime type + URLencodedFormMime = "application/x-www-form-urlencoded" +) diff --git a/test/tools/vendor/github.com/go-openapi/runtime/csv.go b/test/tools/vendor/github.com/go-openapi/runtime/csv.go new file mode 100644 index 0000000000..d807bd915b --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/csv.go @@ -0,0 +1,77 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bytes" + "encoding/csv" + "errors" + "io" +) + +// CSVConsumer creates a new CSV consumer +func CSVConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + if reader == nil { + return errors.New("CSVConsumer requires a reader") + } + + csvReader := csv.NewReader(reader) + writer, ok := data.(io.Writer) + if !ok { + return errors.New("data type must be io.Writer") + } + csvWriter := csv.NewWriter(writer) + records, err := csvReader.ReadAll() + if err != nil { + return err + } + for _, r := range records { + if err := csvWriter.Write(r); err != nil { + return err + } + } + csvWriter.Flush() + return nil + }) +} + +// CSVProducer creates a new CSV producer +func CSVProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + if writer == nil { + return errors.New("CSVProducer requires a writer") + } + + dataBytes, ok := data.([]byte) + if !ok { + return errors.New("data type must be byte array") + } + + csvReader := csv.NewReader(bytes.NewBuffer(dataBytes)) + records, err := csvReader.ReadAll() + if err != nil { + return err + } + csvWriter := csv.NewWriter(writer) + for _, r := range records { + if err := csvWriter.Write(r); err != nil { + return err + } + } + csvWriter.Flush() + return nil + }) +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/discard.go b/test/tools/vendor/github.com/go-openapi/runtime/discard.go new file mode 100644 index 0000000000..0d390cfd64 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/discard.go @@ -0,0 +1,9 @@ +package runtime + +import "io" + +// DiscardConsumer does absolutely nothing, it's a black hole. +var DiscardConsumer = ConsumerFunc(func(_ io.Reader, _ interface{}) error { return nil }) + +// DiscardProducer does absolutely nothing, it's a black hole. +var DiscardProducer = ProducerFunc(func(_ io.Writer, _ interface{}) error { return nil }) diff --git a/test/tools/vendor/github.com/go-openapi/runtime/file.go b/test/tools/vendor/github.com/go-openapi/runtime/file.go new file mode 100644 index 0000000000..397d8a4593 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/file.go @@ -0,0 +1,19 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import "github.com/go-openapi/swag" + +type File = swag.File diff --git a/test/tools/vendor/github.com/go-openapi/runtime/headers.go b/test/tools/vendor/github.com/go-openapi/runtime/headers.go new file mode 100644 index 0000000000..4d111db4fe --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/headers.go @@ -0,0 +1,45 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "mime" + "net/http" + + "github.com/go-openapi/errors" +) + +// ContentType parses a content type header +func ContentType(headers http.Header) (string, string, error) { + ct := headers.Get(HeaderContentType) + orig := ct + if ct == "" { + ct = DefaultMime + } + if ct == "" { + return "", "", nil + } + + mt, opts, err := mime.ParseMediaType(ct) + if err != nil { + return "", "", errors.NewParseError(HeaderContentType, "header", orig, err) + } + + if cs, ok := opts[charsetKey]; ok { + return mt, cs, nil + } + + return mt, "", nil +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/interfaces.go b/test/tools/vendor/github.com/go-openapi/runtime/interfaces.go new file mode 100644 index 0000000000..e334128683 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/interfaces.go @@ -0,0 +1,112 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "context" + "io" + "net/http" + + "github.com/go-openapi/strfmt" +) + +// OperationHandlerFunc an adapter for a function to the OperationHandler interface +type OperationHandlerFunc func(interface{}) (interface{}, error) + +// Handle implements the operation handler interface +func (s OperationHandlerFunc) Handle(data interface{}) (interface{}, error) { + return s(data) +} + +// OperationHandler a handler for a swagger operation +type OperationHandler interface { + Handle(interface{}) (interface{}, error) +} + +// ConsumerFunc represents a function that can be used as a consumer +type ConsumerFunc func(io.Reader, interface{}) error + +// Consume consumes the reader into the data parameter +func (fn ConsumerFunc) Consume(reader io.Reader, data interface{}) error { + return fn(reader, data) +} + +// Consumer implementations know how to bind the values on the provided interface to +// data provided by the request body +type Consumer interface { + // Consume performs the binding of request values + Consume(io.Reader, interface{}) error +} + +// ProducerFunc represents a function that can be used as a producer +type ProducerFunc func(io.Writer, interface{}) error + +// Produce produces the response for the provided data +func (f ProducerFunc) Produce(writer io.Writer, data interface{}) error { + return f(writer, data) +} + +// Producer implementations know how to turn the provided interface into a valid +// HTTP response +type Producer interface { + // Produce writes to the http response + Produce(io.Writer, interface{}) error +} + +// AuthenticatorFunc turns a function into an authenticator +type AuthenticatorFunc func(interface{}) (bool, interface{}, error) + +// Authenticate authenticates the request with the provided data +func (f AuthenticatorFunc) Authenticate(params interface{}) (bool, interface{}, error) { + return f(params) +} + +// Authenticator represents an authentication strategy +// implementations of Authenticator know how to authenticate the +// request data and translate that into a valid principal object or an error +type Authenticator interface { + Authenticate(interface{}) (bool, interface{}, error) +} + +// AuthorizerFunc turns a function into an authorizer +type AuthorizerFunc func(*http.Request, interface{}) error + +// Authorize authorizes the processing of the request for the principal +func (f AuthorizerFunc) Authorize(r *http.Request, principal interface{}) error { + return f(r, principal) +} + +// Authorizer represents an authorization strategy +// implementations of Authorizer know how to authorize the principal object +// using the request data and returns error if unauthorized +type Authorizer interface { + Authorize(*http.Request, interface{}) error +} + +// Validatable types implementing this interface allow customizing their validation +// this will be used instead of the reflective validation based on the spec document. +// the implementations are assumed to have been generated by the swagger tool so they should +// contain all the validations obtained from the spec +type Validatable interface { + Validate(strfmt.Registry) error +} + +// ContextValidatable types implementing this interface allow customizing their validation +// this will be used instead of the reflective validation based on the spec document. +// the implementations are assumed to have been generated by the swagger tool so they should +// contain all the context validations obtained from the spec +type ContextValidatable interface { + ContextValidate(context.Context, strfmt.Registry) error +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/json.go b/test/tools/vendor/github.com/go-openapi/runtime/json.go new file mode 100644 index 0000000000..5a690559cc --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/json.go @@ -0,0 +1,38 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "encoding/json" + "io" +) + +// JSONConsumer creates a new JSON consumer +func JSONConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + dec := json.NewDecoder(reader) + dec.UseNumber() // preserve number formats + return dec.Decode(data) + }) +} + +// JSONProducer creates a new JSON producer +func JSONProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + enc := json.NewEncoder(writer) + enc.SetEscapeHTML(false) + return enc.Encode(data) + }) +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/logger/logger.go b/test/tools/vendor/github.com/go-openapi/runtime/logger/logger.go new file mode 100644 index 0000000000..6f4debcc14 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/logger/logger.go @@ -0,0 +1,20 @@ +package logger + +import "os" + +type Logger interface { + Printf(format string, args ...interface{}) + Debugf(format string, args ...interface{}) +} + +func DebugEnabled() bool { + d := os.Getenv("SWAGGER_DEBUG") + if d != "" && d != "false" && d != "0" { + return true + } + d = os.Getenv("DEBUG") + if d != "" && d != "false" && d != "0" { + return true + } + return false +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/logger/standard.go b/test/tools/vendor/github.com/go-openapi/runtime/logger/standard.go new file mode 100644 index 0000000000..f7e67ebb9e --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/logger/standard.go @@ -0,0 +1,22 @@ +package logger + +import ( + "fmt" + "os" +) + +type StandardLogger struct{} + +func (StandardLogger) Printf(format string, args ...interface{}) { + if len(format) == 0 || format[len(format)-1] != '\n' { + format += "\n" + } + fmt.Fprintf(os.Stderr, format, args...) +} + +func (StandardLogger) Debugf(format string, args ...interface{}) { + if len(format) == 0 || format[len(format)-1] != '\n' { + format += "\n" + } + fmt.Fprintf(os.Stderr, format, args...) +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/context.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/context.go new file mode 100644 index 0000000000..d21ae4e870 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/context.go @@ -0,0 +1,635 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + stdContext "context" + "fmt" + "net/http" + "strings" + "sync" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/errors" + "github.com/go-openapi/loads" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/logger" + "github.com/go-openapi/runtime/middleware/untyped" + "github.com/go-openapi/runtime/security" +) + +// Debug when true turns on verbose logging +var Debug = logger.DebugEnabled() +var Logger logger.Logger = logger.StandardLogger{} + +func debugLog(format string, args ...interface{}) { + if Debug { + Logger.Printf(format, args...) + } +} + +// A Builder can create middlewares +type Builder func(http.Handler) http.Handler + +// PassthroughBuilder returns the handler, aka the builder identity function +func PassthroughBuilder(handler http.Handler) http.Handler { return handler } + +// RequestBinder is an interface for types to implement +// when they want to be able to bind from a request +type RequestBinder interface { + BindRequest(*http.Request, *MatchedRoute) error +} + +// Responder is an interface for types to implement +// when they want to be considered for writing HTTP responses +type Responder interface { + WriteResponse(http.ResponseWriter, runtime.Producer) +} + +// ResponderFunc wraps a func as a Responder interface +type ResponderFunc func(http.ResponseWriter, runtime.Producer) + +// WriteResponse writes to the response +func (fn ResponderFunc) WriteResponse(rw http.ResponseWriter, pr runtime.Producer) { + fn(rw, pr) +} + +// Context is a type safe wrapper around an untyped request context +// used throughout to store request context with the standard context attached +// to the http.Request +type Context struct { + spec *loads.Document + analyzer *analysis.Spec + api RoutableAPI + router Router +} + +type routableUntypedAPI struct { + api *untyped.API + hlock *sync.Mutex + handlers map[string]map[string]http.Handler + defaultConsumes string + defaultProduces string +} + +func newRoutableUntypedAPI(spec *loads.Document, api *untyped.API, context *Context) *routableUntypedAPI { + var handlers map[string]map[string]http.Handler + if spec == nil || api == nil { + return nil + } + analyzer := analysis.New(spec.Spec()) + for method, hls := range analyzer.Operations() { + um := strings.ToUpper(method) + for path, op := range hls { + schemes := analyzer.SecurityRequirementsFor(op) + + if oh, ok := api.OperationHandlerFor(method, path); ok { + if handlers == nil { + handlers = make(map[string]map[string]http.Handler) + } + if b, ok := handlers[um]; !ok || b == nil { + handlers[um] = make(map[string]http.Handler) + } + + var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // lookup route info in the context + route, rCtx, _ := context.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + + // bind and validate the request using reflection + var bound interface{} + var validation error + bound, r, validation = context.BindAndValidate(r, route) + if validation != nil { + context.Respond(w, r, route.Produces, route, validation) + return + } + + // actually handle the request + result, err := oh.Handle(bound) + if err != nil { + // respond with failure + context.Respond(w, r, route.Produces, route, err) + return + } + + // respond with success + context.Respond(w, r, route.Produces, route, result) + }) + + if len(schemes) > 0 { + handler = newSecureAPI(context, handler) + } + handlers[um][path] = handler + } + } + } + + return &routableUntypedAPI{ + api: api, + hlock: new(sync.Mutex), + handlers: handlers, + defaultProduces: api.DefaultProduces, + defaultConsumes: api.DefaultConsumes, + } +} + +func (r *routableUntypedAPI) HandlerFor(method, path string) (http.Handler, bool) { + r.hlock.Lock() + paths, ok := r.handlers[strings.ToUpper(method)] + if !ok { + r.hlock.Unlock() + return nil, false + } + handler, ok := paths[path] + r.hlock.Unlock() + return handler, ok +} +func (r *routableUntypedAPI) ServeErrorFor(operationID string) func(http.ResponseWriter, *http.Request, error) { + return r.api.ServeError +} +func (r *routableUntypedAPI) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { + return r.api.ConsumersFor(mediaTypes) +} +func (r *routableUntypedAPI) ProducersFor(mediaTypes []string) map[string]runtime.Producer { + return r.api.ProducersFor(mediaTypes) +} +func (r *routableUntypedAPI) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator { + return r.api.AuthenticatorsFor(schemes) +} +func (r *routableUntypedAPI) Authorizer() runtime.Authorizer { + return r.api.Authorizer() +} +func (r *routableUntypedAPI) Formats() strfmt.Registry { + return r.api.Formats() +} + +func (r *routableUntypedAPI) DefaultProduces() string { + return r.defaultProduces +} + +func (r *routableUntypedAPI) DefaultConsumes() string { + return r.defaultConsumes +} + +// NewRoutableContext creates a new context for a routable API +func NewRoutableContext(spec *loads.Document, routableAPI RoutableAPI, routes Router) *Context { + var an *analysis.Spec + if spec != nil { + an = analysis.New(spec.Spec()) + } + + return NewRoutableContextWithAnalyzedSpec(spec, an, routableAPI, routes) +} + +// NewRoutableContextWithAnalyzedSpec is like NewRoutableContext but takes in input the analysed spec too +func NewRoutableContextWithAnalyzedSpec(spec *loads.Document, an *analysis.Spec, routableAPI RoutableAPI, routes Router) *Context { + // Either there are no spec doc and analysis, or both of them. + if !((spec == nil && an == nil) || (spec != nil && an != nil)) { + panic(errors.New(http.StatusInternalServerError, "routable context requires either both spec doc and analysis, or none of them")) + } + + ctx := &Context{spec: spec, api: routableAPI, analyzer: an, router: routes} + return ctx +} + +// NewContext creates a new context wrapper +func NewContext(spec *loads.Document, api *untyped.API, routes Router) *Context { + var an *analysis.Spec + if spec != nil { + an = analysis.New(spec.Spec()) + } + ctx := &Context{spec: spec, analyzer: an} + ctx.api = newRoutableUntypedAPI(spec, api, ctx) + ctx.router = routes + return ctx +} + +// Serve serves the specified spec with the specified api registrations as a http.Handler +func Serve(spec *loads.Document, api *untyped.API) http.Handler { + return ServeWithBuilder(spec, api, PassthroughBuilder) +} + +// ServeWithBuilder serves the specified spec with the specified api registrations as a http.Handler that is decorated +// by the Builder +func ServeWithBuilder(spec *loads.Document, api *untyped.API, builder Builder) http.Handler { + context := NewContext(spec, api, nil) + return context.APIHandler(builder) +} + +type contextKey int8 + +const ( + _ contextKey = iota + ctxContentType + ctxResponseFormat + ctxMatchedRoute + ctxBoundParams + ctxSecurityPrincipal + ctxSecurityScopes +) + +// MatchedRouteFrom request context value. +func MatchedRouteFrom(req *http.Request) *MatchedRoute { + mr := req.Context().Value(ctxMatchedRoute) + if mr == nil { + return nil + } + if res, ok := mr.(*MatchedRoute); ok { + return res + } + return nil +} + +// SecurityPrincipalFrom request context value. +func SecurityPrincipalFrom(req *http.Request) interface{} { + return req.Context().Value(ctxSecurityPrincipal) +} + +// SecurityScopesFrom request context value. +func SecurityScopesFrom(req *http.Request) []string { + rs := req.Context().Value(ctxSecurityScopes) + if res, ok := rs.([]string); ok { + return res + } + return nil +} + +type contentTypeValue struct { + MediaType string + Charset string +} + +// BasePath returns the base path for this API +func (c *Context) BasePath() string { + return c.spec.BasePath() +} + +// RequiredProduces returns the accepted content types for responses +func (c *Context) RequiredProduces() []string { + return c.analyzer.RequiredProduces() +} + +// BindValidRequest binds a params object to a request but only when the request is valid +// if the request is not valid an error will be returned +func (c *Context) BindValidRequest(request *http.Request, route *MatchedRoute, binder RequestBinder) error { + var res []error + var requestContentType string + + // check and validate content type, select consumer + if runtime.HasBody(request) { + ct, _, err := runtime.ContentType(request.Header) + if err != nil { + res = append(res, err) + } else { + if err := validateContentType(route.Consumes, ct); err != nil { + res = append(res, err) + } + if len(res) == 0 { + cons, ok := route.Consumers[ct] + if !ok { + res = append(res, errors.New(500, "no consumer registered for %s", ct)) + } else { + route.Consumer = cons + requestContentType = ct + } + } + } + } + + // check and validate the response format + if len(res) == 0 { + // if the route does not provide Produces and a default contentType could not be identified + // based on a body, typical for GET and DELETE requests, then default contentType to. + if len(route.Produces) == 0 && requestContentType == "" { + requestContentType = "*/*" + } + + if str := NegotiateContentType(request, route.Produces, requestContentType); str == "" { + res = append(res, errors.InvalidResponseFormat(request.Header.Get(runtime.HeaderAccept), route.Produces)) + } + } + + // now bind the request with the provided binder + // it's assumed the binder will also validate the request and return an error if the + // request is invalid + if binder != nil && len(res) == 0 { + if err := binder.BindRequest(request, route); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContentType gets the parsed value of a content type +// Returns the media type, its charset and a shallow copy of the request +// when its context doesn't contain the content type value, otherwise it returns +// the same request +// Returns the error that runtime.ContentType may retunrs. +func (c *Context) ContentType(request *http.Request) (string, string, *http.Request, error) { + var rCtx = request.Context() + + if v, ok := rCtx.Value(ctxContentType).(*contentTypeValue); ok { + return v.MediaType, v.Charset, request, nil + } + + mt, cs, err := runtime.ContentType(request.Header) + if err != nil { + return "", "", nil, err + } + rCtx = stdContext.WithValue(rCtx, ctxContentType, &contentTypeValue{mt, cs}) + return mt, cs, request.WithContext(rCtx), nil +} + +// LookupRoute looks a route up and returns true when it is found +func (c *Context) LookupRoute(request *http.Request) (*MatchedRoute, bool) { + if route, ok := c.router.Lookup(request.Method, request.URL.EscapedPath()); ok { + return route, ok + } + return nil, false +} + +// RouteInfo tries to match a route for this request +// Returns the matched route, a shallow copy of the request if its context +// contains the matched router, otherwise the same request, and a bool to +// indicate if it the request matches one of the routes, if it doesn't +// then it returns false and nil for the other two return values +func (c *Context) RouteInfo(request *http.Request) (*MatchedRoute, *http.Request, bool) { + var rCtx = request.Context() + + if v, ok := rCtx.Value(ctxMatchedRoute).(*MatchedRoute); ok { + return v, request, ok + } + + if route, ok := c.LookupRoute(request); ok { + rCtx = stdContext.WithValue(rCtx, ctxMatchedRoute, route) + return route, request.WithContext(rCtx), ok + } + + return nil, nil, false +} + +// ResponseFormat negotiates the response content type +// Returns the response format and a shallow copy of the request if its context +// doesn't contain the response format, otherwise the same request +func (c *Context) ResponseFormat(r *http.Request, offers []string) (string, *http.Request) { + var rCtx = r.Context() + + if v, ok := rCtx.Value(ctxResponseFormat).(string); ok { + debugLog("[%s %s] found response format %q in context", r.Method, r.URL.Path, v) + return v, r + } + + format := NegotiateContentType(r, offers, "") + if format != "" { + debugLog("[%s %s] set response format %q in context", r.Method, r.URL.Path, format) + r = r.WithContext(stdContext.WithValue(rCtx, ctxResponseFormat, format)) + } + debugLog("[%s %s] negotiated response format %q", r.Method, r.URL.Path, format) + return format, r +} + +// AllowedMethods gets the allowed methods for the path of this request +func (c *Context) AllowedMethods(request *http.Request) []string { + return c.router.OtherMethods(request.Method, request.URL.EscapedPath()) +} + +// ResetAuth removes the current principal from the request context +func (c *Context) ResetAuth(request *http.Request) *http.Request { + rctx := request.Context() + rctx = stdContext.WithValue(rctx, ctxSecurityPrincipal, nil) + rctx = stdContext.WithValue(rctx, ctxSecurityScopes, nil) + return request.WithContext(rctx) +} + +// Authorize authorizes the request +// Returns the principal object and a shallow copy of the request when its +// context doesn't contain the principal, otherwise the same request or an error +// (the last) if one of the authenticators returns one or an Unauthenticated error +func (c *Context) Authorize(request *http.Request, route *MatchedRoute) (interface{}, *http.Request, error) { + if route == nil || !route.HasAuth() { + return nil, nil, nil + } + + var rCtx = request.Context() + if v := rCtx.Value(ctxSecurityPrincipal); v != nil { + return v, request, nil + } + + applies, usr, err := route.Authenticators.Authenticate(request, route) + if !applies || err != nil || !route.Authenticators.AllowsAnonymous() && usr == nil { + if err != nil { + return nil, nil, err + } + return nil, nil, errors.Unauthenticated("invalid credentials") + } + if route.Authorizer != nil { + if err := route.Authorizer.Authorize(request, usr); err != nil { + if _, ok := err.(errors.Error); ok { + return nil, nil, err + } + + return nil, nil, errors.New(http.StatusForbidden, err.Error()) + } + } + + rCtx = request.Context() + + rCtx = stdContext.WithValue(rCtx, ctxSecurityPrincipal, usr) + rCtx = stdContext.WithValue(rCtx, ctxSecurityScopes, route.Authenticator.AllScopes()) + return usr, request.WithContext(rCtx), nil +} + +// BindAndValidate binds and validates the request +// Returns the validation map and a shallow copy of the request when its context +// doesn't contain the validation, otherwise it returns the same request or an +// CompositeValidationError error +func (c *Context) BindAndValidate(request *http.Request, matched *MatchedRoute) (interface{}, *http.Request, error) { + var rCtx = request.Context() + + if v, ok := rCtx.Value(ctxBoundParams).(*validation); ok { + debugLog("got cached validation (valid: %t)", len(v.result) == 0) + if len(v.result) > 0 { + return v.bound, request, errors.CompositeValidationError(v.result...) + } + return v.bound, request, nil + } + result := validateRequest(c, request, matched) + rCtx = stdContext.WithValue(rCtx, ctxBoundParams, result) + request = request.WithContext(rCtx) + if len(result.result) > 0 { + return result.bound, request, errors.CompositeValidationError(result.result...) + } + debugLog("no validation errors found") + return result.bound, request, nil +} + +// NotFound the default not found responder for when no route has been matched yet +func (c *Context) NotFound(rw http.ResponseWriter, r *http.Request) { + c.Respond(rw, r, []string{c.api.DefaultProduces()}, nil, errors.NotFound("not found")) +} + +// Respond renders the response after doing some content negotiation +func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []string, route *MatchedRoute, data interface{}) { + debugLog("responding to %s %s with produces: %v", r.Method, r.URL.Path, produces) + offers := []string{} + for _, mt := range produces { + if mt != c.api.DefaultProduces() { + offers = append(offers, mt) + } + } + // the default producer is last so more specific producers take precedence + offers = append(offers, c.api.DefaultProduces()) + debugLog("offers: %v", offers) + + var format string + format, r = c.ResponseFormat(r, offers) + rw.Header().Set(runtime.HeaderContentType, format) + + if resp, ok := data.(Responder); ok { + producers := route.Producers + // producers contains keys with normalized format, if a format has MIME type parameter such as `text/plain; charset=utf-8` + // then you must provide `text/plain` to get the correct producer. HOWEVER, format here is not normalized. + prod, ok := producers[normalizeOffer(format)] + if !ok { + prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()})) + pr, ok := prods[c.api.DefaultProduces()] + if !ok { + panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format)) + } + prod = pr + } + resp.WriteResponse(rw, prod) + return + } + + if err, ok := data.(error); ok { + if format == "" { + rw.Header().Set(runtime.HeaderContentType, runtime.JSONMime) + } + + if realm := security.FailedBasicAuth(r); realm != "" { + rw.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", realm)) + } + + if route == nil || route.Operation == nil { + c.api.ServeErrorFor("")(rw, r, err) + return + } + c.api.ServeErrorFor(route.Operation.ID)(rw, r, err) + return + } + + if route == nil || route.Operation == nil { + rw.WriteHeader(200) + if r.Method == "HEAD" { + return + } + producers := c.api.ProducersFor(normalizeOffers(offers)) + prod, ok := producers[format] + if !ok { + panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format)) + } + if err := prod.Produce(rw, data); err != nil { + panic(err) // let the recovery middleware deal with this + } + return + } + + if _, code, ok := route.Operation.SuccessResponse(); ok { + rw.WriteHeader(code) + if code == 204 || r.Method == "HEAD" { + return + } + + producers := route.Producers + prod, ok := producers[format] + if !ok { + if !ok { + prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()})) + pr, ok := prods[c.api.DefaultProduces()] + if !ok { + panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format)) + } + prod = pr + } + } + if err := prod.Produce(rw, data); err != nil { + panic(err) // let the recovery middleware deal with this + } + return + } + + c.api.ServeErrorFor(route.Operation.ID)(rw, r, errors.New(http.StatusInternalServerError, "can't produce response")) +} + +func (c *Context) APIHandlerSwaggerUI(builder Builder) http.Handler { + b := builder + if b == nil { + b = PassthroughBuilder + } + + var title string + sp := c.spec.Spec() + if sp != nil && sp.Info != nil && sp.Info.Title != "" { + title = sp.Info.Title + } + + swaggerUIOpts := SwaggerUIOpts{ + BasePath: c.BasePath(), + Title: title, + } + + return Spec("", c.spec.Raw(), SwaggerUI(swaggerUIOpts, c.RoutesHandler(b))) +} + +// APIHandler returns a handler to serve the API, this includes a swagger spec, router and the contract defined in the swagger spec +func (c *Context) APIHandler(builder Builder) http.Handler { + b := builder + if b == nil { + b = PassthroughBuilder + } + + var title string + sp := c.spec.Spec() + if sp != nil && sp.Info != nil && sp.Info.Title != "" { + title = sp.Info.Title + } + + redocOpts := RedocOpts{ + BasePath: c.BasePath(), + Title: title, + } + + return Spec("", c.spec.Raw(), Redoc(redocOpts, c.RoutesHandler(b))) +} + +// RoutesHandler returns a handler to serve the API, just the routes and the contract defined in the swagger spec +func (c *Context) RoutesHandler(builder Builder) http.Handler { + b := builder + if b == nil { + b = PassthroughBuilder + } + return NewRouter(c, b(NewOperationExecutor(c))) +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE b/test/tools/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE new file mode 100644 index 0000000000..e65039ad84 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2014 Naoya Inada + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/denco/README.md b/test/tools/vendor/github.com/go-openapi/runtime/middleware/denco/README.md new file mode 100644 index 0000000000..30109e17d5 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/denco/README.md @@ -0,0 +1,180 @@ +# Denco [![Build Status](https://travis-ci.org/naoina/denco.png?branch=master)](https://travis-ci.org/naoina/denco) + +The fast and flexible HTTP request router for [Go](http://golang.org). + +Denco is based on Double-Array implementation of [Kocha-urlrouter](https://github.com/naoina/kocha-urlrouter). +However, Denco is optimized and some features added. + +## Features + +* Fast (See [go-http-routing-benchmark](https://github.com/naoina/go-http-routing-benchmark)) +* [URL patterns](#url-patterns) (`/foo/:bar` and `/foo/*wildcard`) +* Small (but enough) URL router API +* HTTP request multiplexer like `http.ServeMux` + +## Installation + + go get -u github.com/go-openapi/runtime/middleware/denco + +## Using as HTTP request multiplexer + +```go +package main + +import ( + "fmt" + "log" + "net/http" + + "github.com/go-openapi/runtime/middleware/denco" +) + +func Index(w http.ResponseWriter, r *http.Request, params denco.Params) { + fmt.Fprintf(w, "Welcome to Denco!\n") +} + +func User(w http.ResponseWriter, r *http.Request, params denco.Params) { + fmt.Fprintf(w, "Hello %s!\n", params.Get("name")) +} + +func main() { + mux := denco.NewMux() + handler, err := mux.Build([]denco.Handler{ + mux.GET("/", Index), + mux.GET("/user/:name", User), + mux.POST("/user/:name", User), + }) + if err != nil { + panic(err) + } + log.Fatal(http.ListenAndServe(":8080", handler)) +} +``` + +## Using as URL router + +```go +package main + +import ( + "fmt" + + "github.com/go-openapi/runtime/middleware/denco" +) + +type route struct { + name string +} + +func main() { + router := denco.New() + router.Build([]denco.Record{ + {"/", &route{"root"}}, + {"/user/:id", &route{"user"}}, + {"/user/:name/:id", &route{"username"}}, + {"/static/*filepath", &route{"static"}}, + }) + + data, params, found := router.Lookup("/") + // print `&main.route{name:"root"}, denco.Params(nil), true`. + fmt.Printf("%#v, %#v, %#v\n", data, params, found) + + data, params, found = router.Lookup("/user/hoge") + // print `&main.route{name:"user"}, denco.Params{denco.Param{Name:"id", Value:"hoge"}}, true`. + fmt.Printf("%#v, %#v, %#v\n", data, params, found) + + data, params, found = router.Lookup("/user/hoge/7") + // print `&main.route{name:"username"}, denco.Params{denco.Param{Name:"name", Value:"hoge"}, denco.Param{Name:"id", Value:"7"}}, true`. + fmt.Printf("%#v, %#v, %#v\n", data, params, found) + + data, params, found = router.Lookup("/static/path/to/file") + // print `&main.route{name:"static"}, denco.Params{denco.Param{Name:"filepath", Value:"path/to/file"}}, true`. + fmt.Printf("%#v, %#v, %#v\n", data, params, found) +} +``` + +See [Godoc](http://godoc.org/github.com/go-openapi/runtime/middleware/denco) for more details. + +## Getting the value of path parameter + +You can get the value of path parameter by 2 ways. + +1. Using [`denco.Params.Get`](http://godoc.org/github.com/go-openapi/runtime/middleware/denco#Params.Get) method +2. Find by loop + +```go +package main + +import ( + "fmt" + + "github.com/go-openapi/runtime/middleware/denco" +) + +func main() { + router := denco.New() + if err := router.Build([]denco.Record{ + {"/user/:name/:id", "route1"}, + }); err != nil { + panic(err) + } + + // 1. Using denco.Params.Get method. + _, params, _ := router.Lookup("/user/alice/1") + name := params.Get("name") + if name != "" { + fmt.Printf("Hello %s.\n", name) // prints "Hello alice.". + } + + // 2. Find by loop. + for _, param := range params { + if param.Name == "name" { + fmt.Printf("Hello %s.\n", name) // prints "Hello alice.". + } + } +} +``` + +## URL patterns + +Denco's route matching strategy is "most nearly matching". + +When routes `/:name` and `/alice` have been built, URI `/alice` matches the route `/alice`, not `/:name`. +Because URI `/alice` is more match with the route `/alice` than `/:name`. + +For more example, when routes below have been built: + +``` +/user/alice +/user/:name +/user/:name/:id +/user/alice/:id +/user/:id/bob +``` + +Routes matching are: + +``` +/user/alice => "/user/alice" (no match with "/user/:name") +/user/bob => "/user/:name" +/user/naoina/1 => "/user/:name/1" +/user/alice/1 => "/user/alice/:id" (no match with "/user/:name/:id") +/user/1/bob => "/user/:id/bob" (no match with "/user/:name/:id") +/user/alice/bob => "/user/alice/:id" (no match with "/user/:name/:id" and "/user/:id/bob") +``` + +## Limitation + +Denco has some limitations below. + +* Number of param records (such as `/:name`) must be less than 2^22 +* Number of elements of internal slice must be less than 2^22 + +## Benchmarks + + cd $GOPATH/github.com/go-openapi/runtime/middleware/denco + go test -bench . -benchmem + +## License + +Denco is licensed under the MIT License. diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/denco/router.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/denco/router.go new file mode 100644 index 0000000000..5d2691ec36 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/denco/router.go @@ -0,0 +1,460 @@ +// Package denco provides fast URL router. +package denco + +import ( + "fmt" + "sort" + "strings" +) + +const ( + // ParamCharacter is a special character for path parameter. + ParamCharacter = ':' + + // WildcardCharacter is a special character for wildcard path parameter. + WildcardCharacter = '*' + + // TerminationCharacter is a special character for end of path. + TerminationCharacter = '#' + + // SeparatorCharacter separates path segments. + SeparatorCharacter = '/' + + // PathParamCharacter indicates a RESTCONF path param + PathParamCharacter = '=' + + // MaxSize is max size of records and internal slice. + MaxSize = (1 << 22) - 1 +) + +// Router represents a URL router. +type Router struct { + // SizeHint expects the maximum number of path parameters in records to Build. + // SizeHint will be used to determine the capacity of the memory to allocate. + // By default, SizeHint will be determined from given records to Build. + SizeHint int + + static map[string]interface{} + param *doubleArray +} + +// New returns a new Router. +func New() *Router { + return &Router{ + SizeHint: -1, + static: make(map[string]interface{}), + param: newDoubleArray(), + } +} + +// Lookup returns data and path parameters that associated with path. +// params is a slice of the Param that arranged in the order in which parameters appeared. +// e.g. when built routing path is "/path/to/:id/:name" and given path is "/path/to/1/alice". params order is [{"id": "1"}, {"name": "alice"}], not [{"name": "alice"}, {"id": "1"}]. +func (rt *Router) Lookup(path string) (data interface{}, params Params, found bool) { + if data, found := rt.static[path]; found { + return data, nil, true + } + if len(rt.param.node) == 1 { + return nil, nil, false + } + nd, params, found := rt.param.lookup(path, make([]Param, 0, rt.SizeHint), 1) + if !found { + return nil, nil, false + } + for i := 0; i < len(params); i++ { + params[i].Name = nd.paramNames[i] + } + return nd.data, params, true +} + +// Build builds URL router from records. +func (rt *Router) Build(records []Record) error { + statics, params := makeRecords(records) + if len(params) > MaxSize { + return fmt.Errorf("denco: too many records") + } + if rt.SizeHint < 0 { + rt.SizeHint = 0 + for _, p := range params { + size := 0 + for _, k := range p.Key { + if k == ParamCharacter || k == WildcardCharacter { + size++ + } + } + if size > rt.SizeHint { + rt.SizeHint = size + } + } + } + for _, r := range statics { + rt.static[r.Key] = r.Value + } + if err := rt.param.build(params, 1, 0, make(map[int]struct{})); err != nil { + return err + } + return nil +} + +// Param represents name and value of path parameter. +type Param struct { + Name string + Value string +} + +// Params represents the name and value of path parameters. +type Params []Param + +// Get gets the first value associated with the given name. +// If there are no values associated with the key, Get returns "". +func (ps Params) Get(name string) string { + for _, p := range ps { + if p.Name == name { + return p.Value + } + } + return "" +} + +type doubleArray struct { + bc []baseCheck + node []*node +} + +func newDoubleArray() *doubleArray { + return &doubleArray{ + bc: []baseCheck{0}, + node: []*node{nil}, // A start index is adjusting to 1 because 0 will be used as a mark of non-existent node. + } +} + +// baseCheck contains BASE, CHECK and Extra flags. +// From the top, 22bits of BASE, 2bits of Extra flags and 8bits of CHECK. +// +// BASE (22bit) | Extra flags (2bit) | CHECK (8bit) +// |----------------------|--|--------| +// 32 10 8 0 +type baseCheck uint32 + +func (bc baseCheck) Base() int { + return int(bc >> 10) +} + +func (bc *baseCheck) SetBase(base int) { + *bc |= baseCheck(base) << 10 +} + +func (bc baseCheck) Check() byte { + return byte(bc) +} + +func (bc *baseCheck) SetCheck(check byte) { + *bc |= baseCheck(check) +} + +func (bc baseCheck) IsEmpty() bool { + return bc&0xfffffcff == 0 +} + +func (bc baseCheck) IsSingleParam() bool { + return bc¶mTypeSingle == paramTypeSingle +} + +func (bc baseCheck) IsWildcardParam() bool { + return bc¶mTypeWildcard == paramTypeWildcard +} + +func (bc baseCheck) IsAnyParam() bool { + return bc¶mTypeAny != 0 +} + +func (bc *baseCheck) SetSingleParam() { + *bc |= (1 << 8) +} + +func (bc *baseCheck) SetWildcardParam() { + *bc |= (1 << 9) +} + +const ( + paramTypeSingle = 0x0100 + paramTypeWildcard = 0x0200 + paramTypeAny = 0x0300 +) + +func (da *doubleArray) lookup(path string, params []Param, idx int) (*node, []Param, bool) { + indices := make([]uint64, 0, 1) + for i := 0; i < len(path); i++ { + if da.bc[idx].IsAnyParam() { + indices = append(indices, (uint64(i)<<32)|(uint64(idx)&0xffffffff)) + } + c := path[i] + if idx = nextIndex(da.bc[idx].Base(), c); idx >= len(da.bc) || da.bc[idx].Check() != c { + goto BACKTRACKING + } + } + if next := nextIndex(da.bc[idx].Base(), TerminationCharacter); next < len(da.bc) && da.bc[next].Check() == TerminationCharacter { + return da.node[da.bc[next].Base()], params, true + } +BACKTRACKING: + for j := len(indices) - 1; j >= 0; j-- { + i, idx := int(indices[j]>>32), int(indices[j]&0xffffffff) + if da.bc[idx].IsSingleParam() { + idx := nextIndex(da.bc[idx].Base(), ParamCharacter) + if idx >= len(da.bc) { + break + } + next := NextSeparator(path, i) + params := append(params, Param{Value: path[i:next]}) + if nd, params, found := da.lookup(path[next:], params, idx); found { + return nd, params, true + } + } + if da.bc[idx].IsWildcardParam() { + idx := nextIndex(da.bc[idx].Base(), WildcardCharacter) + params := append(params, Param{Value: path[i:]}) + return da.node[da.bc[idx].Base()], params, true + } + } + return nil, nil, false +} + +// build builds double-array from records. +func (da *doubleArray) build(srcs []*record, idx, depth int, usedBase map[int]struct{}) error { + sort.Stable(recordSlice(srcs)) + base, siblings, leaf, err := da.arrange(srcs, idx, depth, usedBase) + if err != nil { + return err + } + if leaf != nil { + nd, err := makeNode(leaf) + if err != nil { + return err + } + da.bc[idx].SetBase(len(da.node)) + da.node = append(da.node, nd) + } + for _, sib := range siblings { + da.setCheck(nextIndex(base, sib.c), sib.c) + } + for _, sib := range siblings { + records := srcs[sib.start:sib.end] + switch sib.c { + case ParamCharacter: + for _, r := range records { + next := NextSeparator(r.Key, depth+1) + name := r.Key[depth+1 : next] + r.paramNames = append(r.paramNames, name) + r.Key = r.Key[next:] + } + da.bc[idx].SetSingleParam() + if err := da.build(records, nextIndex(base, sib.c), 0, usedBase); err != nil { + return err + } + case WildcardCharacter: + r := records[0] + name := r.Key[depth+1 : len(r.Key)-1] + r.paramNames = append(r.paramNames, name) + r.Key = "" + da.bc[idx].SetWildcardParam() + if err := da.build(records, nextIndex(base, sib.c), 0, usedBase); err != nil { + return err + } + default: + if err := da.build(records, nextIndex(base, sib.c), depth+1, usedBase); err != nil { + return err + } + } + } + return nil +} + +// setBase sets BASE. +func (da *doubleArray) setBase(i, base int) { + da.bc[i].SetBase(base) +} + +// setCheck sets CHECK. +func (da *doubleArray) setCheck(i int, check byte) { + da.bc[i].SetCheck(check) +} + +// findEmptyIndex returns an index of unused BASE/CHECK node. +func (da *doubleArray) findEmptyIndex(start int) int { + i := start + for ; i < len(da.bc); i++ { + if da.bc[i].IsEmpty() { + break + } + } + return i +} + +// findBase returns good BASE. +func (da *doubleArray) findBase(siblings []sibling, start int, usedBase map[int]struct{}) (base int) { + for idx, firstChar := start+1, siblings[0].c; ; idx = da.findEmptyIndex(idx + 1) { + base = nextIndex(idx, firstChar) + if _, used := usedBase[base]; used { + continue + } + i := 0 + for ; i < len(siblings); i++ { + next := nextIndex(base, siblings[i].c) + if len(da.bc) <= next { + da.bc = append(da.bc, make([]baseCheck, next-len(da.bc)+1)...) + } + if !da.bc[next].IsEmpty() { + break + } + } + if i == len(siblings) { + break + } + } + usedBase[base] = struct{}{} + return base +} + +func (da *doubleArray) arrange(records []*record, idx, depth int, usedBase map[int]struct{}) (base int, siblings []sibling, leaf *record, err error) { + siblings, leaf, err = makeSiblings(records, depth) + if err != nil { + return -1, nil, nil, err + } + if len(siblings) < 1 { + return -1, nil, leaf, nil + } + base = da.findBase(siblings, idx, usedBase) + if base > MaxSize { + return -1, nil, nil, fmt.Errorf("denco: too many elements of internal slice") + } + da.setBase(idx, base) + return base, siblings, leaf, err +} + +// node represents a node of Double-Array. +type node struct { + data interface{} + + // Names of path parameters. + paramNames []string +} + +// makeNode returns a new node from record. +func makeNode(r *record) (*node, error) { + dups := make(map[string]bool) + for _, name := range r.paramNames { + if dups[name] { + return nil, fmt.Errorf("denco: path parameter `%v' is duplicated in the key `%v'", name, r.Key) + } + dups[name] = true + } + return &node{data: r.Value, paramNames: r.paramNames}, nil +} + +// sibling represents an intermediate data of build for Double-Array. +type sibling struct { + // An index of start of duplicated characters. + start int + + // An index of end of duplicated characters. + end int + + // A character of sibling. + c byte +} + +// nextIndex returns a next index of array of BASE/CHECK. +func nextIndex(base int, c byte) int { + return base ^ int(c) +} + +// makeSiblings returns slice of sibling. +func makeSiblings(records []*record, depth int) (sib []sibling, leaf *record, err error) { + var ( + pc byte + n int + ) + for i, r := range records { + if len(r.Key) <= depth { + leaf = r + continue + } + c := r.Key[depth] + switch { + case pc < c: + sib = append(sib, sibling{start: i, c: c}) + case pc == c: + continue + default: + return nil, nil, fmt.Errorf("denco: BUG: routing table hasn't been sorted") + } + if n > 0 { + sib[n-1].end = i + } + pc = c + n++ + } + if n == 0 { + return nil, leaf, nil + } + sib[n-1].end = len(records) + return sib, leaf, nil +} + +// Record represents a record data for router construction. +type Record struct { + // Key for router construction. + Key string + + // Result value for Key. + Value interface{} +} + +// NewRecord returns a new Record. +func NewRecord(key string, value interface{}) Record { + return Record{ + Key: key, + Value: value, + } +} + +// record represents a record that use to build the Double-Array. +type record struct { + Record + paramNames []string +} + +// makeRecords returns the records that use to build Double-Arrays. +func makeRecords(srcs []Record) (statics, params []*record) { + termChar := string(TerminationCharacter) + paramPrefix := string(SeparatorCharacter) + string(ParamCharacter) + wildcardPrefix := string(SeparatorCharacter) + string(WildcardCharacter) + restconfPrefix := string(PathParamCharacter) + string(ParamCharacter) + for _, r := range srcs { + if strings.Contains(r.Key, paramPrefix) || strings.Contains(r.Key, wildcardPrefix) ||strings.Contains(r.Key, restconfPrefix){ + r.Key += termChar + params = append(params, &record{Record: r}) + } else { + statics = append(statics, &record{Record: r}) + } + } + return statics, params +} + +// recordSlice represents a slice of Record for sort and implements the sort.Interface. +type recordSlice []*record + +// Len implements the sort.Interface.Len. +func (rs recordSlice) Len() int { + return len(rs) +} + +// Less implements the sort.Interface.Less. +func (rs recordSlice) Less(i, j int) bool { + return rs[i].Key < rs[j].Key +} + +// Swap implements the sort.Interface.Swap. +func (rs recordSlice) Swap(i, j int) { + rs[i], rs[j] = rs[j], rs[i] +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/denco/server.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/denco/server.go new file mode 100644 index 0000000000..0886713c18 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/denco/server.go @@ -0,0 +1,106 @@ +package denco + +import ( + "net/http" +) + +// Mux represents a multiplexer for HTTP request. +type Mux struct{} + +// NewMux returns a new Mux. +func NewMux() *Mux { + return &Mux{} +} + +// GET is shorthand of Mux.Handler("GET", path, handler). +func (m *Mux) GET(path string, handler HandlerFunc) Handler { + return m.Handler("GET", path, handler) +} + +// POST is shorthand of Mux.Handler("POST", path, handler). +func (m *Mux) POST(path string, handler HandlerFunc) Handler { + return m.Handler("POST", path, handler) +} + +// PUT is shorthand of Mux.Handler("PUT", path, handler). +func (m *Mux) PUT(path string, handler HandlerFunc) Handler { + return m.Handler("PUT", path, handler) +} + +// HEAD is shorthand of Mux.Handler("HEAD", path, handler). +func (m *Mux) HEAD(path string, handler HandlerFunc) Handler { + return m.Handler("HEAD", path, handler) +} + +// Handler returns a handler for HTTP method. +func (m *Mux) Handler(method, path string, handler HandlerFunc) Handler { + return Handler{ + Method: method, + Path: path, + Func: handler, + } +} + +// Build builds a http.Handler. +func (m *Mux) Build(handlers []Handler) (http.Handler, error) { + recordMap := make(map[string][]Record) + for _, h := range handlers { + recordMap[h.Method] = append(recordMap[h.Method], NewRecord(h.Path, h.Func)) + } + mux := newServeMux() + for m, records := range recordMap { + router := New() + if err := router.Build(records); err != nil { + return nil, err + } + mux.routers[m] = router + } + return mux, nil +} + +// Handler represents a handler of HTTP request. +type Handler struct { + // Method is an HTTP method. + Method string + + // Path is a routing path for handler. + Path string + + // Func is a function of handler of HTTP request. + Func HandlerFunc +} + +// The HandlerFunc type is aliased to type of handler function. +type HandlerFunc func(w http.ResponseWriter, r *http.Request, params Params) + +type serveMux struct { + routers map[string]*Router +} + +func newServeMux() *serveMux { + return &serveMux{ + routers: make(map[string]*Router), + } +} + +// ServeHTTP implements http.Handler interface. +func (mux *serveMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + handler, params := mux.handler(r.Method, r.URL.Path) + handler(w, r, params) +} + +func (mux *serveMux) handler(method, path string) (HandlerFunc, []Param) { + if router, found := mux.routers[method]; found { + if handler, params, found := router.Lookup(path); found { + return handler.(HandlerFunc), params + } + } + return NotFound, nil +} + +// NotFound replies to the request with an HTTP 404 not found error. +// NotFound is called when unknown HTTP method or a handler not found. +// If you want to use the your own NotFound handler, please overwrite this variable. +var NotFound = func(w http.ResponseWriter, r *http.Request, _ Params) { + http.NotFound(w, r) +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/denco/util.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/denco/util.go new file mode 100644 index 0000000000..edc1f6ab80 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/denco/util.go @@ -0,0 +1,12 @@ +package denco + +// NextSeparator returns an index of next separator in path. +func NextSeparator(path string, start int) int { + for start < len(path) { + if c := path[start]; c == '/' || c == TerminationCharacter { + break + } + start++ + } + return start +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/doc.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/doc.go new file mode 100644 index 0000000000..eaf90606ac --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/doc.go @@ -0,0 +1,62 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/*Package middleware provides the library with helper functions for serving swagger APIs. + +Pseudo middleware handler + + import ( + "net/http" + + "github.com/go-openapi/errors" + ) + + func newCompleteMiddleware(ctx *Context) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + // use context to lookup routes + if matched, ok := ctx.RouteInfo(r); ok { + + if matched.NeedsAuth() { + if _, err := ctx.Authorize(r, matched); err != nil { + ctx.Respond(rw, r, matched.Produces, matched, err) + return + } + } + + bound, validation := ctx.BindAndValidate(r, matched) + if validation != nil { + ctx.Respond(rw, r, matched.Produces, matched, validation) + return + } + + result, err := matched.Handler.Handle(bound) + if err != nil { + ctx.Respond(rw, r, matched.Produces, matched, err) + return + } + + ctx.Respond(rw, r, matched.Produces, matched, result) + return + } + + // Not found, check if it exists in the other methods first + if others := ctx.AllowedMethods(r); len(others) > 0 { + ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others)) + return + } + ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.Path)) + }) + } +*/ +package middleware diff --git a/vendor/github.com/go-openapi/runtime/middleware/go18.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/go18.go similarity index 100% rename from vendor/github.com/go-openapi/runtime/middleware/go18.go rename to test/tools/vendor/github.com/go-openapi/runtime/middleware/go18.go diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/header/header.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/header/header.go new file mode 100644 index 0000000000..e069743e30 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/header/header.go @@ -0,0 +1,329 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// this file was taken from the github.com/golang/gddo repository + +// Package header provides functions for parsing HTTP headers. +package header + +import ( + "net/http" + "strings" + "time" +) + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// Copy returns a shallow copy of the header. +func Copy(header http.Header) http.Header { + h := make(http.Header) + for k, vs := range header { + h[k] = vs + } + return h +} + +var timeLayouts = []string{"Mon, 02 Jan 2006 15:04:05 GMT", time.RFC850, time.ANSIC} + +// ParseTime parses the header as time. The zero value is returned if the +// header is not present or there is an error parsing the +// header. +func ParseTime(header http.Header, key string) time.Time { + if s := header.Get(key); s != "" { + for _, layout := range timeLayouts { + if t, err := time.Parse(layout, s); err == nil { + return t.UTC() + } + } + } + return time.Time{} +} + +// ParseList parses a comma separated list of values. Commas are ignored in +// quoted strings. Quoted values are not unescaped or unquoted. Whitespace is +// trimmed. +func ParseList(header http.Header, key string) []string { + var result []string + for _, s := range header[http.CanonicalHeaderKey(key)] { + begin := 0 + end := 0 + escape := false + quote := false + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + end = i + 1 + case quote: + switch b { + case '\\': + escape = true + case '"': + quote = false + } + end = i + 1 + case b == '"': + quote = true + end = i + 1 + case octetTypes[b]&isSpace != 0: + if begin == end { + begin = i + 1 + end = begin + } + case b == ',': + if begin < end { + result = append(result, s[begin:end]) + } + begin = i + 1 + end = begin + default: + end = i + 1 + } + } + if begin < end { + result = append(result, s[begin:end]) + } + } + return result +} + +// ParseValueAndParams parses a comma separated list of values with optional +// semicolon separated name-value pairs. Content-Type and Content-Disposition +// headers are in this format. +func ParseValueAndParams(header http.Header, key string) (string, map[string]string) { + return parseValueAndParams(header.Get(key)) +} + +func parseValueAndParams(s string) (value string, params map[string]string) { + params = make(map[string]string) + value, s = expectTokenSlash(s) + if value == "" { + return + } + value = strings.ToLower(value) + s = skipSpace(s) + for strings.HasPrefix(s, ";") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +// AcceptSpec ... +type AcceptSpec struct { + Value string + Q float64 +} + +// ParseAccept2 ... +func ParseAccept2(header http.Header, key string) (specs []AcceptSpec) { + for _, en := range ParseList(header, key) { + v, p := parseValueAndParams(en) + var spec AcceptSpec + spec.Value = v + spec.Q = 1.0 + if p != nil { + if q, ok := p["q"]; ok { + spec.Q, _ = expectQuality(q) + } + } + if spec.Q < 0.0 { + continue + } + specs = append(specs, spec) + } + + return +} + +// ParseAccept parses Accept* headers. +func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec AcceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + for !strings.HasPrefix(s, "q=") && s != "" && !strings.HasPrefix(s, ",") { + s = skipSpace(s[1:]) + } + if strings.HasPrefix(s, "q=") { + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + // q is already 0 + s = s[1:] + case s[0] == '1': + s = s[1:] + q = 1 + case s[0] == '.': + // q is already 0 + default: + return -1, "" + } + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/negotiate.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/negotiate.go new file mode 100644 index 0000000000..a9b6f27d3d --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/negotiate.go @@ -0,0 +1,98 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// this file was taken from the github.com/golang/gddo repository + +package middleware + +import ( + "net/http" + "strings" + + "github.com/go-openapi/runtime/middleware/header" +) + +// NegotiateContentEncoding returns the best offered content encoding for the +// request's Accept-Encoding header. If two offers match with equal weight and +// then the offer earlier in the list is preferred. If no offers are +// acceptable, then "" is returned. +func NegotiateContentEncoding(r *http.Request, offers []string) string { + bestOffer := "identity" + bestQ := -1.0 + specs := header.ParseAccept(r.Header, "Accept-Encoding") + for _, offer := range offers { + for _, spec := range specs { + if spec.Q > bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} + +// NegotiateContentType returns the best offered content type for the request's +// Accept header. If two offers match with equal weight, then the more specific +// offer is preferred. For example, text/* trumps */*. If two offers match +// with equal weight and specificity, then the offer earlier in the list is +// preferred. If no offers match, then defaultOffer is returned. +func NegotiateContentType(r *http.Request, offers []string, defaultOffer string) string { + bestOffer := defaultOffer + bestQ := -1.0 + bestWild := 3 + specs := header.ParseAccept(r.Header, "Accept") + for _, rawOffer := range offers { + offer := normalizeOffer(rawOffer) + // No Accept header: just return the first offer. + if len(specs) == 0 { + return rawOffer + } + for _, spec := range specs { + switch { + case spec.Q == 0.0: + // ignore + case spec.Q < bestQ: + // better match found + case spec.Value == "*/*": + if spec.Q > bestQ || bestWild > 2 { + bestQ = spec.Q + bestWild = 2 + bestOffer = rawOffer + } + case strings.HasSuffix(spec.Value, "/*"): + if strings.HasPrefix(offer, spec.Value[:len(spec.Value)-1]) && + (spec.Q > bestQ || bestWild > 1) { + bestQ = spec.Q + bestWild = 1 + bestOffer = rawOffer + } + default: + if spec.Value == offer && + (spec.Q > bestQ || bestWild > 0) { + bestQ = spec.Q + bestWild = 0 + bestOffer = rawOffer + } + } + } + } + return bestOffer +} + +func normalizeOffers(orig []string) (norm []string) { + for _, o := range orig { + norm = append(norm, normalizeOffer(o)) + } + return +} + +func normalizeOffer(orig string) string { + return strings.SplitN(orig, ";", 2)[0] +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go new file mode 100644 index 0000000000..bc6942a0f1 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go @@ -0,0 +1,67 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "net/http" + + "github.com/go-openapi/runtime" +) + +type errorResp struct { + code int + response interface{} + headers http.Header +} + +func (e *errorResp) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + for k, v := range e.headers { + for _, val := range v { + rw.Header().Add(k, val) + } + } + if e.code > 0 { + rw.WriteHeader(e.code) + } else { + rw.WriteHeader(http.StatusInternalServerError) + } + if err := producer.Produce(rw, e.response); err != nil { + Logger.Printf("failed to write error response: %v", err) + } +} + +// NotImplemented the error response when the response is not implemented +func NotImplemented(message string) Responder { + return Error(http.StatusNotImplemented, message) +} + +// Error creates a generic responder for returning errors, the data will be serialized +// with the matching producer for the request +func Error(code int, data interface{}, headers ...http.Header) Responder { + var hdr http.Header + for _, h := range headers { + for k, v := range h { + if hdr == nil { + hdr = make(http.Header) + } + hdr[k] = v + } + } + return &errorResp{ + code: code, + response: data, + headers: hdr, + } +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/operation.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/operation.go new file mode 100644 index 0000000000..1175a63cf2 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/operation.go @@ -0,0 +1,30 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import "net/http" + +// NewOperationExecutor creates a context aware middleware that handles the operations after routing +func NewOperationExecutor(ctx *Context) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + // use context to lookup routes + route, rCtx, _ := ctx.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + + route.Handler.ServeHTTP(rw, r) + }) +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/parameter.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/parameter.go new file mode 100644 index 0000000000..9aaf65958a --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/parameter.go @@ -0,0 +1,485 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" + + "github.com/go-openapi/runtime" +) + +const defaultMaxMemory = 32 << 20 + +var textUnmarshalType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() + +func newUntypedParamBinder(param spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *untypedParamBinder { + binder := new(untypedParamBinder) + binder.Name = param.Name + binder.parameter = ¶m + binder.formats = formats + if param.In != "body" { + binder.validator = validate.NewParamValidator(¶m, formats) + } else { + binder.validator = validate.NewSchemaValidator(param.Schema, spec, param.Name, formats) + } + + return binder +} + +type untypedParamBinder struct { + parameter *spec.Parameter + formats strfmt.Registry + Name string + validator validate.EntityValidator +} + +func (p *untypedParamBinder) Type() reflect.Type { + return p.typeForSchema(p.parameter.Type, p.parameter.Format, p.parameter.Items) +} + +func (p *untypedParamBinder) typeForSchema(tpe, format string, items *spec.Items) reflect.Type { + switch tpe { + case "boolean": + return reflect.TypeOf(true) + + case "string": + if tt, ok := p.formats.GetType(format); ok { + return tt + } + return reflect.TypeOf("") + + case "integer": + switch format { + case "int8": + return reflect.TypeOf(int8(0)) + case "int16": + return reflect.TypeOf(int16(0)) + case "int32": + return reflect.TypeOf(int32(0)) + case "int64": + return reflect.TypeOf(int64(0)) + default: + return reflect.TypeOf(int64(0)) + } + + case "number": + switch format { + case "float": + return reflect.TypeOf(float32(0)) + case "double": + return reflect.TypeOf(float64(0)) + } + + case "array": + if items == nil { + return nil + } + itemsType := p.typeForSchema(items.Type, items.Format, items.Items) + if itemsType == nil { + return nil + } + return reflect.MakeSlice(reflect.SliceOf(itemsType), 0, 0).Type() + + case "file": + return reflect.TypeOf(&runtime.File{}).Elem() + + case "object": + return reflect.TypeOf(map[string]interface{}{}) + } + return nil +} + +func (p *untypedParamBinder) allowsMulti() bool { + return p.parameter.In == "query" || p.parameter.In == "formData" +} + +func (p *untypedParamBinder) readValue(values runtime.Gettable, target reflect.Value) ([]string, bool, bool, error) { + name, in, cf, tpe := p.parameter.Name, p.parameter.In, p.parameter.CollectionFormat, p.parameter.Type + if tpe == "array" { + if cf == "multi" { + if !p.allowsMulti() { + return nil, false, false, errors.InvalidCollectionFormat(name, in, cf) + } + vv, hasKey, _ := values.GetOK(name) + return vv, false, hasKey, nil + } + + v, hk, hv := values.GetOK(name) + if !hv { + return nil, false, hk, nil + } + d, c, e := p.readFormattedSliceFieldValue(v[len(v)-1], target) + return d, c, hk, e + } + + vv, hk, _ := values.GetOK(name) + return vv, false, hk, nil +} + +func (p *untypedParamBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, target reflect.Value) error { + // fmt.Println("binding", p.name, "as", p.Type()) + switch p.parameter.In { + case "query": + data, custom, hasKey, err := p.readValue(runtime.Values(request.URL.Query()), target) + if err != nil { + return err + } + if custom { + return nil + } + + return p.bindValue(data, hasKey, target) + + case "header": + data, custom, hasKey, err := p.readValue(runtime.Values(request.Header), target) + if err != nil { + return err + } + if custom { + return nil + } + return p.bindValue(data, hasKey, target) + + case "path": + data, custom, hasKey, err := p.readValue(routeParams, target) + if err != nil { + return err + } + if custom { + return nil + } + return p.bindValue(data, hasKey, target) + + case "formData": + var err error + var mt string + + mt, _, e := runtime.ContentType(request.Header) + if e != nil { + // because of the interface conversion go thinks the error is not nil + // so we first check for nil and then set the err var if it's not nil + err = e + } + + if err != nil { + return errors.InvalidContentType("", []string{"multipart/form-data", "application/x-www-form-urlencoded"}) + } + + if mt != "multipart/form-data" && mt != "application/x-www-form-urlencoded" { + return errors.InvalidContentType(mt, []string{"multipart/form-data", "application/x-www-form-urlencoded"}) + } + + if mt == "multipart/form-data" { + if err = request.ParseMultipartForm(defaultMaxMemory); err != nil { + return errors.NewParseError(p.Name, p.parameter.In, "", err) + } + } + + if err = request.ParseForm(); err != nil { + return errors.NewParseError(p.Name, p.parameter.In, "", err) + } + + if p.parameter.Type == "file" { + file, header, ffErr := request.FormFile(p.parameter.Name) + if ffErr != nil { + if p.parameter.Required { + return errors.NewParseError(p.Name, p.parameter.In, "", ffErr) + } else { + return nil + } + } + target.Set(reflect.ValueOf(runtime.File{Data: file, Header: header})) + return nil + } + + if request.MultipartForm != nil { + data, custom, hasKey, rvErr := p.readValue(runtime.Values(request.MultipartForm.Value), target) + if rvErr != nil { + return rvErr + } + if custom { + return nil + } + return p.bindValue(data, hasKey, target) + } + data, custom, hasKey, err := p.readValue(runtime.Values(request.PostForm), target) + if err != nil { + return err + } + if custom { + return nil + } + return p.bindValue(data, hasKey, target) + + case "body": + newValue := reflect.New(target.Type()) + if !runtime.HasBody(request) { + if p.parameter.Default != nil { + target.Set(reflect.ValueOf(p.parameter.Default)) + } + + return nil + } + if err := consumer.Consume(request.Body, newValue.Interface()); err != nil { + if err == io.EOF && p.parameter.Default != nil { + target.Set(reflect.ValueOf(p.parameter.Default)) + return nil + } + tpe := p.parameter.Type + if p.parameter.Format != "" { + tpe = p.parameter.Format + } + return errors.InvalidType(p.Name, p.parameter.In, tpe, nil) + } + target.Set(reflect.Indirect(newValue)) + return nil + default: + return errors.New(500, fmt.Sprintf("invalid parameter location %q", p.parameter.In)) + } +} + +func (p *untypedParamBinder) bindValue(data []string, hasKey bool, target reflect.Value) error { + if p.parameter.Type == "array" { + return p.setSliceFieldValue(target, p.parameter.Default, data, hasKey) + } + var d string + if len(data) > 0 { + d = data[len(data)-1] + } + return p.setFieldValue(target, p.parameter.Default, d, hasKey) +} + +func (p *untypedParamBinder) setFieldValue(target reflect.Value, defaultValue interface{}, data string, hasKey bool) error { + tpe := p.parameter.Type + if p.parameter.Format != "" { + tpe = p.parameter.Format + } + + if (!hasKey || (!p.parameter.AllowEmptyValue && data == "")) && p.parameter.Required && p.parameter.Default == nil { + return errors.Required(p.Name, p.parameter.In, data) + } + + ok, err := p.tryUnmarshaler(target, defaultValue, data) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if ok { + return nil + } + + defVal := reflect.Zero(target.Type()) + if defaultValue != nil { + defVal = reflect.ValueOf(defaultValue) + } + + if tpe == "byte" { + if data == "" { + if target.CanSet() { + target.SetBytes(defVal.Bytes()) + } + return nil + } + + b, err := base64.StdEncoding.DecodeString(data) + if err != nil { + b, err = base64.URLEncoding.DecodeString(data) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + } + if target.CanSet() { + target.SetBytes(b) + } + return nil + } + + switch target.Kind() { + case reflect.Bool: + if data == "" { + if target.CanSet() { + target.SetBool(defVal.Bool()) + } + return nil + } + b, err := swag.ConvertBool(data) + if err != nil { + return err + } + if target.CanSet() { + target.SetBool(b) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if data == "" { + if target.CanSet() { + rd := defVal.Convert(reflect.TypeOf(int64(0))) + target.SetInt(rd.Int()) + } + return nil + } + i, err := strconv.ParseInt(data, 10, 64) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.OverflowInt(i) { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.CanSet() { + target.SetInt(i) + } + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if data == "" { + if target.CanSet() { + rd := defVal.Convert(reflect.TypeOf(uint64(0))) + target.SetUint(rd.Uint()) + } + return nil + } + u, err := strconv.ParseUint(data, 10, 64) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.OverflowUint(u) { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.CanSet() { + target.SetUint(u) + } + + case reflect.Float32, reflect.Float64: + if data == "" { + if target.CanSet() { + rd := defVal.Convert(reflect.TypeOf(float64(0))) + target.SetFloat(rd.Float()) + } + return nil + } + f, err := strconv.ParseFloat(data, 64) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.OverflowFloat(f) { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.CanSet() { + target.SetFloat(f) + } + + case reflect.String: + value := data + if value == "" { + value = defVal.String() + } + // validate string + if target.CanSet() { + target.SetString(value) + } + + case reflect.Ptr: + if data == "" && defVal.Kind() == reflect.Ptr { + if target.CanSet() { + target.Set(defVal) + } + return nil + } + newVal := reflect.New(target.Type().Elem()) + if err := p.setFieldValue(reflect.Indirect(newVal), defVal, data, hasKey); err != nil { + return err + } + if target.CanSet() { + target.Set(newVal) + } + + default: + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + return nil +} + +func (p *untypedParamBinder) tryUnmarshaler(target reflect.Value, defaultValue interface{}, data string) (bool, error) { + if !target.CanSet() { + return false, nil + } + // When a type implements encoding.TextUnmarshaler we'll use that instead of reflecting some more + if reflect.PtrTo(target.Type()).Implements(textUnmarshalType) { + if defaultValue != nil && len(data) == 0 { + target.Set(reflect.ValueOf(defaultValue)) + return true, nil + } + value := reflect.New(target.Type()) + if err := value.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(data)); err != nil { + return true, err + } + target.Set(reflect.Indirect(value)) + return true, nil + } + return false, nil +} + +func (p *untypedParamBinder) readFormattedSliceFieldValue(data string, target reflect.Value) ([]string, bool, error) { + ok, err := p.tryUnmarshaler(target, p.parameter.Default, data) + if err != nil { + return nil, true, err + } + if ok { + return nil, true, nil + } + + return swag.SplitByFormat(data, p.parameter.CollectionFormat), false, nil +} + +func (p *untypedParamBinder) setSliceFieldValue(target reflect.Value, defaultValue interface{}, data []string, hasKey bool) error { + sz := len(data) + if (!hasKey || (!p.parameter.AllowEmptyValue && (sz == 0 || (sz == 1 && data[0] == "")))) && p.parameter.Required && defaultValue == nil { + return errors.Required(p.Name, p.parameter.In, data) + } + + defVal := reflect.Zero(target.Type()) + if defaultValue != nil { + defVal = reflect.ValueOf(defaultValue) + } + + if !target.CanSet() { + return nil + } + if sz == 0 { + target.Set(defVal) + return nil + } + + value := reflect.MakeSlice(reflect.SliceOf(target.Type().Elem()), sz, sz) + + for i := 0; i < sz; i++ { + if err := p.setFieldValue(value.Index(i), nil, data[i], hasKey); err != nil { + return err + } + } + + target.Set(value) + + return nil +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go similarity index 100% rename from vendor/github.com/go-openapi/runtime/middleware/pre_go18.go rename to test/tools/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go new file mode 100644 index 0000000000..4be330d6dc --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go @@ -0,0 +1,90 @@ +package middleware + +import ( + "bytes" + "fmt" + "html/template" + "net/http" + "path" +) + +// RapiDocOpts configures the RapiDoc middlewares +type RapiDocOpts struct { + // BasePath for the UI path, defaults to: / + BasePath string + // Path combines with BasePath for the full UI path, defaults to: docs + Path string + // SpecURL the url to find the spec for + SpecURL string + // RapiDocURL for the js that generates the rapidoc site, defaults to: https://cdn.jsdelivr.net/npm/rapidoc/bundles/rapidoc.standalone.js + RapiDocURL string + // Title for the documentation site, default to: API documentation + Title string +} + +// EnsureDefaults in case some options are missing +func (r *RapiDocOpts) EnsureDefaults() { + if r.BasePath == "" { + r.BasePath = "/" + } + if r.Path == "" { + r.Path = "docs" + } + if r.SpecURL == "" { + r.SpecURL = "/swagger.json" + } + if r.RapiDocURL == "" { + r.RapiDocURL = rapidocLatest + } + if r.Title == "" { + r.Title = "API documentation" + } +} + +// RapiDoc creates a middleware to serve a documentation site for a swagger spec. +// This allows for altering the spec before starting the http listener. +// +func RapiDoc(opts RapiDocOpts, next http.Handler) http.Handler { + opts.EnsureDefaults() + + pth := path.Join(opts.BasePath, opts.Path) + tmpl := template.Must(template.New("rapidoc").Parse(rapidocTemplate)) + + buf := bytes.NewBuffer(nil) + _ = tmpl.Execute(buf, opts) + b := buf.Bytes() + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if r.URL.Path == pth { + rw.Header().Set("Content-Type", "text/html; charset=utf-8") + rw.WriteHeader(http.StatusOK) + + _, _ = rw.Write(b) + return + } + + if next == nil { + rw.Header().Set("Content-Type", "text/plain") + rw.WriteHeader(http.StatusNotFound) + _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) + return + } + next.ServeHTTP(rw, r) + }) +} + +const ( + rapidocLatest = "https://unpkg.com/rapidoc/dist/rapidoc-min.js" + rapidocTemplate = ` + + + {{ .Title }} + + + + + + + +` +) diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/redoc.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/redoc.go new file mode 100644 index 0000000000..019c854295 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/redoc.go @@ -0,0 +1,103 @@ +package middleware + +import ( + "bytes" + "fmt" + "html/template" + "net/http" + "path" +) + +// RedocOpts configures the Redoc middlewares +type RedocOpts struct { + // BasePath for the UI path, defaults to: / + BasePath string + // Path combines with BasePath for the full UI path, defaults to: docs + Path string + // SpecURL the url to find the spec for + SpecURL string + // RedocURL for the js that generates the redoc site, defaults to: https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js + RedocURL string + // Title for the documentation site, default to: API documentation + Title string +} + +// EnsureDefaults in case some options are missing +func (r *RedocOpts) EnsureDefaults() { + if r.BasePath == "" { + r.BasePath = "/" + } + if r.Path == "" { + r.Path = "docs" + } + if r.SpecURL == "" { + r.SpecURL = "/swagger.json" + } + if r.RedocURL == "" { + r.RedocURL = redocLatest + } + if r.Title == "" { + r.Title = "API documentation" + } +} + +// Redoc creates a middleware to serve a documentation site for a swagger spec. +// This allows for altering the spec before starting the http listener. +// +func Redoc(opts RedocOpts, next http.Handler) http.Handler { + opts.EnsureDefaults() + + pth := path.Join(opts.BasePath, opts.Path) + tmpl := template.Must(template.New("redoc").Parse(redocTemplate)) + + buf := bytes.NewBuffer(nil) + _ = tmpl.Execute(buf, opts) + b := buf.Bytes() + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if r.URL.Path == pth { + rw.Header().Set("Content-Type", "text/html; charset=utf-8") + rw.WriteHeader(http.StatusOK) + + _, _ = rw.Write(b) + return + } + + if next == nil { + rw.Header().Set("Content-Type", "text/plain") + rw.WriteHeader(http.StatusNotFound) + _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) + return + } + next.ServeHTTP(rw, r) + }) +} + +const ( + redocLatest = "https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js" + redocTemplate = ` + + + {{ .Title }} + + + + + + + + + + + + + +` +) diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/request.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/request.go new file mode 100644 index 0000000000..760c37861d --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/request.go @@ -0,0 +1,104 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "net/http" + "reflect" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" +) + +// UntypedRequestBinder binds and validates the data from a http request +type UntypedRequestBinder struct { + Spec *spec.Swagger + Parameters map[string]spec.Parameter + Formats strfmt.Registry + paramBinders map[string]*untypedParamBinder +} + +// NewUntypedRequestBinder creates a new binder for reading a request. +func NewUntypedRequestBinder(parameters map[string]spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *UntypedRequestBinder { + binders := make(map[string]*untypedParamBinder) + for fieldName, param := range parameters { + binders[fieldName] = newUntypedParamBinder(param, spec, formats) + } + return &UntypedRequestBinder{ + Parameters: parameters, + paramBinders: binders, + Spec: spec, + Formats: formats, + } +} + +// Bind perform the databinding and validation +func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, data interface{}) error { + val := reflect.Indirect(reflect.ValueOf(data)) + isMap := val.Kind() == reflect.Map + var result []error + debugLog("binding %d parameters for %s %s", len(o.Parameters), request.Method, request.URL.EscapedPath()) + for fieldName, param := range o.Parameters { + binder := o.paramBinders[fieldName] + debugLog("binding parameter %s for %s %s", fieldName, request.Method, request.URL.EscapedPath()) + var target reflect.Value + if !isMap { + binder.Name = fieldName + target = val.FieldByName(fieldName) + } + + if isMap { + tpe := binder.Type() + if tpe == nil { + if param.Schema.Type.Contains("array") { + tpe = reflect.TypeOf([]interface{}{}) + } else { + tpe = reflect.TypeOf(map[string]interface{}{}) + } + } + target = reflect.Indirect(reflect.New(tpe)) + } + + if !target.IsValid() { + result = append(result, errors.New(500, "parameter name %q is an unknown field", binder.Name)) + continue + } + + if err := binder.Bind(request, routeParams, consumer, target); err != nil { + result = append(result, err) + continue + } + + if binder.validator != nil { + rr := binder.validator.Validate(target.Interface()) + if rr != nil && rr.HasErrors() { + result = append(result, rr.AsError()) + } + } + + if isMap { + val.SetMapIndex(reflect.ValueOf(param.Name), target) + } + } + + if len(result) > 0 { + return errors.CompositeValidationError(result...) + } + + return nil +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/router.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/router.go new file mode 100644 index 0000000000..5052031c8d --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/router.go @@ -0,0 +1,488 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "fmt" + "net/http" + fpath "path" + "regexp" + "strings" + + "github.com/go-openapi/runtime/security" + "github.com/go-openapi/swag" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/errors" + "github.com/go-openapi/loads" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware/denco" +) + +// RouteParam is a object to capture route params in a framework agnostic way. +// implementations of the muxer should use these route params to communicate with the +// swagger framework +type RouteParam struct { + Name string + Value string +} + +// RouteParams the collection of route params +type RouteParams []RouteParam + +// Get gets the value for the route param for the specified key +func (r RouteParams) Get(name string) string { + vv, _, _ := r.GetOK(name) + if len(vv) > 0 { + return vv[len(vv)-1] + } + return "" +} + +// GetOK gets the value but also returns booleans to indicate if a key or value +// is present. This aids in validation and satisfies an interface in use there +// +// The returned values are: data, has key, has value +func (r RouteParams) GetOK(name string) ([]string, bool, bool) { + for _, p := range r { + if p.Name == name { + return []string{p.Value}, true, p.Value != "" + } + } + return nil, false, false +} + +// NewRouter creates a new context aware router middleware +func NewRouter(ctx *Context, next http.Handler) http.Handler { + if ctx.router == nil { + ctx.router = DefaultRouter(ctx.spec, ctx.api) + } + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if _, rCtx, ok := ctx.RouteInfo(r); ok { + next.ServeHTTP(rw, rCtx) + return + } + + // Not found, check if it exists in the other methods first + if others := ctx.AllowedMethods(r); len(others) > 0 { + ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others)) + return + } + + ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.EscapedPath())) + }) +} + +// RoutableAPI represents an interface for things that can serve +// as a provider of implementations for the swagger router +type RoutableAPI interface { + HandlerFor(string, string) (http.Handler, bool) + ServeErrorFor(string) func(http.ResponseWriter, *http.Request, error) + ConsumersFor([]string) map[string]runtime.Consumer + ProducersFor([]string) map[string]runtime.Producer + AuthenticatorsFor(map[string]spec.SecurityScheme) map[string]runtime.Authenticator + Authorizer() runtime.Authorizer + Formats() strfmt.Registry + DefaultProduces() string + DefaultConsumes() string +} + +// Router represents a swagger aware router +type Router interface { + Lookup(method, path string) (*MatchedRoute, bool) + OtherMethods(method, path string) []string +} + +type defaultRouteBuilder struct { + spec *loads.Document + analyzer *analysis.Spec + api RoutableAPI + records map[string][]denco.Record +} + +type defaultRouter struct { + spec *loads.Document + routers map[string]*denco.Router +} + +func newDefaultRouteBuilder(spec *loads.Document, api RoutableAPI) *defaultRouteBuilder { + return &defaultRouteBuilder{ + spec: spec, + analyzer: analysis.New(spec.Spec()), + api: api, + records: make(map[string][]denco.Record), + } +} + +// DefaultRouter creates a default implemenation of the router +func DefaultRouter(spec *loads.Document, api RoutableAPI) Router { + builder := newDefaultRouteBuilder(spec, api) + if spec != nil { + for method, paths := range builder.analyzer.Operations() { + for path, operation := range paths { + fp := fpath.Join(spec.BasePath(), path) + debugLog("adding route %s %s %q", method, fp, operation.ID) + builder.AddRoute(method, fp, operation) + } + } + } + return builder.Build() +} + +// RouteAuthenticator is an authenticator that can compose several authenticators together. +// It also knows when it contains an authenticator that allows for anonymous pass through. +// Contains a group of 1 or more authenticators that have a logical AND relationship +type RouteAuthenticator struct { + Authenticator map[string]runtime.Authenticator + Schemes []string + Scopes map[string][]string + allScopes []string + commonScopes []string + allowAnonymous bool +} + +func (ra *RouteAuthenticator) AllowsAnonymous() bool { + return ra.allowAnonymous +} + +// AllScopes returns a list of unique scopes that is the combination +// of all the scopes in the requirements +func (ra *RouteAuthenticator) AllScopes() []string { + return ra.allScopes +} + +// CommonScopes returns a list of unique scopes that are common in all the +// scopes in the requirements +func (ra *RouteAuthenticator) CommonScopes() []string { + return ra.commonScopes +} + +// Authenticate Authenticator interface implementation +func (ra *RouteAuthenticator) Authenticate(req *http.Request, route *MatchedRoute) (bool, interface{}, error) { + if ra.allowAnonymous { + route.Authenticator = ra + return true, nil, nil + } + // iterate in proper order + var lastResult interface{} + for _, scheme := range ra.Schemes { + if authenticator, ok := ra.Authenticator[scheme]; ok { + applies, princ, err := authenticator.Authenticate(&security.ScopedAuthRequest{ + Request: req, + RequiredScopes: ra.Scopes[scheme], + }) + if !applies { + return false, nil, nil + } + if err != nil { + route.Authenticator = ra + return true, nil, err + } + lastResult = princ + } + } + route.Authenticator = ra + return true, lastResult, nil +} + +func stringSliceUnion(slices ...[]string) []string { + unique := make(map[string]struct{}) + var result []string + for _, slice := range slices { + for _, entry := range slice { + if _, ok := unique[entry]; ok { + continue + } + unique[entry] = struct{}{} + result = append(result, entry) + } + } + return result +} + +func stringSliceIntersection(slices ...[]string) []string { + unique := make(map[string]int) + var intersection []string + + total := len(slices) + var emptyCnt int + for _, slice := range slices { + if len(slice) == 0 { + emptyCnt++ + continue + } + + for _, entry := range slice { + unique[entry]++ + if unique[entry] == total-emptyCnt { // this entry appeared in all the non-empty slices + intersection = append(intersection, entry) + } + } + } + + return intersection +} + +// RouteAuthenticators represents a group of authenticators that represent a logical OR +type RouteAuthenticators []RouteAuthenticator + +// AllowsAnonymous returns true when there is an authenticator that means optional auth +func (ras RouteAuthenticators) AllowsAnonymous() bool { + for _, ra := range ras { + if ra.AllowsAnonymous() { + return true + } + } + return false +} + +// Authenticate method implemention so this collection can be used as authenticator +func (ras RouteAuthenticators) Authenticate(req *http.Request, route *MatchedRoute) (bool, interface{}, error) { + var lastError error + var allowsAnon bool + var anonAuth RouteAuthenticator + + for _, ra := range ras { + if ra.AllowsAnonymous() { + anonAuth = ra + allowsAnon = true + continue + } + applies, usr, err := ra.Authenticate(req, route) + if !applies || err != nil || usr == nil { + if err != nil { + lastError = err + } + continue + } + return applies, usr, nil + } + + if allowsAnon && lastError == nil { + route.Authenticator = &anonAuth + return true, nil, lastError + } + return lastError != nil, nil, lastError +} + +type routeEntry struct { + PathPattern string + BasePath string + Operation *spec.Operation + Consumes []string + Consumers map[string]runtime.Consumer + Produces []string + Producers map[string]runtime.Producer + Parameters map[string]spec.Parameter + Handler http.Handler + Formats strfmt.Registry + Binder *UntypedRequestBinder + Authenticators RouteAuthenticators + Authorizer runtime.Authorizer +} + +// MatchedRoute represents the route that was matched in this request +type MatchedRoute struct { + routeEntry + Params RouteParams + Consumer runtime.Consumer + Producer runtime.Producer + Authenticator *RouteAuthenticator +} + +// HasAuth returns true when the route has a security requirement defined +func (m *MatchedRoute) HasAuth() bool { + return len(m.Authenticators) > 0 +} + +// NeedsAuth returns true when the request still +// needs to perform authentication +func (m *MatchedRoute) NeedsAuth() bool { + return m.HasAuth() && m.Authenticator == nil +} + +func (d *defaultRouter) Lookup(method, path string) (*MatchedRoute, bool) { + mth := strings.ToUpper(method) + debugLog("looking up route for %s %s", method, path) + if Debug { + if len(d.routers) == 0 { + debugLog("there are no known routers") + } + for meth := range d.routers { + debugLog("got a router for %s", meth) + } + } + if router, ok := d.routers[mth]; ok { + if m, rp, ok := router.Lookup(fpath.Clean(path)); ok && m != nil { + if entry, ok := m.(*routeEntry); ok { + debugLog("found a route for %s %s with %d parameters", method, path, len(entry.Parameters)) + var params RouteParams + for _, p := range rp { + v, err := pathUnescape(p.Value) + if err != nil { + debugLog("failed to escape %q: %v", p.Value, err) + v = p.Value + } + // a workaround to handle fragment/composing parameters until they are supported in denco router + // check if this parameter is a fragment within a path segment + if xpos := strings.Index(entry.PathPattern, fmt.Sprintf("{%s}", p.Name)) + len(p.Name) + 2; xpos < len(entry.PathPattern) && entry.PathPattern[xpos] != '/' { + // extract fragment parameters + ep := strings.Split(entry.PathPattern[xpos:], "/")[0] + pnames, pvalues := decodeCompositParams(p.Name, v, ep, nil, nil) + for i, pname := range pnames { + params = append(params, RouteParam{Name: pname, Value: pvalues[i]}) + } + } else { + // use the parameter directly + params = append(params, RouteParam{Name: p.Name, Value: v}) + } + } + return &MatchedRoute{routeEntry: *entry, Params: params}, true + } + } else { + debugLog("couldn't find a route by path for %s %s", method, path) + } + } else { + debugLog("couldn't find a route by method for %s %s", method, path) + } + return nil, false +} + +func (d *defaultRouter) OtherMethods(method, path string) []string { + mn := strings.ToUpper(method) + var methods []string + for k, v := range d.routers { + if k != mn { + if _, _, ok := v.Lookup(fpath.Clean(path)); ok { + methods = append(methods, k) + continue + } + } + } + return methods +} + +// convert swagger parameters per path segment into a denco parameter as multiple parameters per segment are not supported in denco +var pathConverter = regexp.MustCompile(`{(.+?)}([^/]*)`) + +func decodeCompositParams(name string, value string, pattern string, names []string, values []string) ([]string, []string) { + pleft := strings.Index(pattern, "{") + names = append(names, name) + if pleft < 0 { + if strings.HasSuffix(value, pattern) { + values = append(values, value[:len(value)-len(pattern)]) + } else { + values = append(values, "") + } + } else { + toskip := pattern[:pleft] + pright := strings.Index(pattern, "}") + vright := strings.Index(value, toskip) + if vright >= 0 { + values = append(values, value[:vright]) + } else { + values = append(values, "") + value = "" + } + return decodeCompositParams(pattern[pleft+1:pright], value[vright+len(toskip):], pattern[pright+1:], names, values) + } + return names, values +} + +func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Operation) { + mn := strings.ToUpper(method) + + bp := fpath.Clean(d.spec.BasePath()) + if len(bp) > 0 && bp[len(bp)-1] == '/' { + bp = bp[:len(bp)-1] + } + + debugLog("operation: %#v", *operation) + if handler, ok := d.api.HandlerFor(method, strings.TrimPrefix(path, bp)); ok { + consumes := d.analyzer.ConsumesFor(operation) + produces := d.analyzer.ProducesFor(operation) + parameters := d.analyzer.ParamsFor(method, strings.TrimPrefix(path, bp)) + + // add API defaults if not part of the spec + if defConsumes := d.api.DefaultConsumes(); defConsumes != "" && !swag.ContainsStringsCI(consumes, defConsumes) { + consumes = append(consumes, defConsumes) + } + + if defProduces := d.api.DefaultProduces(); defProduces != "" && !swag.ContainsStringsCI(produces, defProduces) { + produces = append(produces, defProduces) + } + + record := denco.NewRecord(pathConverter.ReplaceAllString(path, ":$1"), &routeEntry{ + BasePath: bp, + PathPattern: path, + Operation: operation, + Handler: handler, + Consumes: consumes, + Produces: produces, + Consumers: d.api.ConsumersFor(normalizeOffers(consumes)), + Producers: d.api.ProducersFor(normalizeOffers(produces)), + Parameters: parameters, + Formats: d.api.Formats(), + Binder: NewUntypedRequestBinder(parameters, d.spec.Spec(), d.api.Formats()), + Authenticators: d.buildAuthenticators(operation), + Authorizer: d.api.Authorizer(), + }) + d.records[mn] = append(d.records[mn], record) + } +} + +func (d *defaultRouteBuilder) buildAuthenticators(operation *spec.Operation) RouteAuthenticators { + requirements := d.analyzer.SecurityRequirementsFor(operation) + var auths []RouteAuthenticator + for _, reqs := range requirements { + var schemes []string + scopes := make(map[string][]string, len(reqs)) + var scopeSlices [][]string + for _, req := range reqs { + schemes = append(schemes, req.Name) + scopes[req.Name] = req.Scopes + scopeSlices = append(scopeSlices, req.Scopes) + } + + definitions := d.analyzer.SecurityDefinitionsForRequirements(reqs) + authenticators := d.api.AuthenticatorsFor(definitions) + auths = append(auths, RouteAuthenticator{ + Authenticator: authenticators, + Schemes: schemes, + Scopes: scopes, + allScopes: stringSliceUnion(scopeSlices...), + commonScopes: stringSliceIntersection(scopeSlices...), + allowAnonymous: len(reqs) == 1 && reqs[0].Name == "", + }) + } + return auths +} + +func (d *defaultRouteBuilder) Build() *defaultRouter { + routers := make(map[string]*denco.Router) + for method, records := range d.records { + router := denco.New() + _ = router.Build(records) + routers[method] = router + } + return &defaultRouter{ + spec: d.spec, + routers: routers, + } +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/security.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/security.go new file mode 100644 index 0000000000..2b061caefc --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/security.go @@ -0,0 +1,39 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import "net/http" + +func newSecureAPI(ctx *Context, next http.Handler) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := ctx.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + if route != nil && !route.NeedsAuth() { + next.ServeHTTP(rw, r) + return + } + + _, rCtx, err := ctx.Authorize(r, route) + if err != nil { + ctx.Respond(rw, r, route.Produces, route, err) + return + } + r = rCtx + + next.ServeHTTP(rw, r) + }) +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/spec.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/spec.go new file mode 100644 index 0000000000..f029142980 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/spec.go @@ -0,0 +1,48 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "net/http" + "path" +) + +// Spec creates a middleware to serve a swagger spec. +// This allows for altering the spec before starting the http listener. +// This can be useful if you want to serve the swagger spec from another path than /swagger.json +// +func Spec(basePath string, b []byte, next http.Handler) http.Handler { + if basePath == "" { + basePath = "/" + } + pth := path.Join(basePath, "swagger.json") + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if r.URL.Path == pth { + rw.Header().Set("Content-Type", "application/json") + rw.WriteHeader(http.StatusOK) + //#nosec + _, _ = rw.Write(b) + return + } + + if next == nil { + rw.Header().Set("Content-Type", "application/json") + rw.WriteHeader(http.StatusNotFound) + return + } + next.ServeHTTP(rw, r) + }) +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go new file mode 100644 index 0000000000..b4dea29e4b --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go @@ -0,0 +1,168 @@ +package middleware + +import ( + "bytes" + "fmt" + "html/template" + "net/http" + "path" +) + +// SwaggerUIOpts configures the Swaggerui middlewares +type SwaggerUIOpts struct { + // BasePath for the UI path, defaults to: / + BasePath string + // Path combines with BasePath for the full UI path, defaults to: docs + Path string + // SpecURL the url to find the spec for + SpecURL string + // OAuthCallbackURL the url called after OAuth2 login + OAuthCallbackURL string + + // The three components needed to embed swagger-ui + SwaggerURL string + SwaggerPresetURL string + SwaggerStylesURL string + + Favicon32 string + Favicon16 string + + // Title for the documentation site, default to: API documentation + Title string +} + +// EnsureDefaults in case some options are missing +func (r *SwaggerUIOpts) EnsureDefaults() { + if r.BasePath == "" { + r.BasePath = "/" + } + if r.Path == "" { + r.Path = "docs" + } + if r.SpecURL == "" { + r.SpecURL = "/swagger.json" + } + if r.OAuthCallbackURL == "" { + r.OAuthCallbackURL = path.Join(r.BasePath, r.Path, "oauth2-callback") + } + if r.SwaggerURL == "" { + r.SwaggerURL = swaggerLatest + } + if r.SwaggerPresetURL == "" { + r.SwaggerPresetURL = swaggerPresetLatest + } + if r.SwaggerStylesURL == "" { + r.SwaggerStylesURL = swaggerStylesLatest + } + if r.Favicon16 == "" { + r.Favicon16 = swaggerFavicon16Latest + } + if r.Favicon32 == "" { + r.Favicon32 = swaggerFavicon32Latest + } + if r.Title == "" { + r.Title = "API documentation" + } +} + +// SwaggerUI creates a middleware to serve a documentation site for a swagger spec. +// This allows for altering the spec before starting the http listener. +func SwaggerUI(opts SwaggerUIOpts, next http.Handler) http.Handler { + opts.EnsureDefaults() + + pth := path.Join(opts.BasePath, opts.Path) + tmpl := template.Must(template.New("swaggerui").Parse(swaggeruiTemplate)) + + buf := bytes.NewBuffer(nil) + _ = tmpl.Execute(buf, &opts) + b := buf.Bytes() + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if path.Join(r.URL.Path) == pth { + rw.Header().Set("Content-Type", "text/html; charset=utf-8") + rw.WriteHeader(http.StatusOK) + + _, _ = rw.Write(b) + return + } + + if next == nil { + rw.Header().Set("Content-Type", "text/plain") + rw.WriteHeader(http.StatusNotFound) + _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) + return + } + next.ServeHTTP(rw, r) + }) +} + +const ( + swaggerLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui-bundle.js" + swaggerPresetLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui-standalone-preset.js" + swaggerStylesLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui.css" + swaggerFavicon32Latest = "https://unpkg.com/swagger-ui-dist/favicon-32x32.png" + swaggerFavicon16Latest = "https://unpkg.com/swagger-ui-dist/favicon-16x16.png" + swaggeruiTemplate = ` + + + + + {{ .Title }} + + + + + + + + +
+ + + + + + +` +) diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go new file mode 100644 index 0000000000..576f6003f7 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go @@ -0,0 +1,122 @@ +package middleware + +import ( + "bytes" + "fmt" + "net/http" + "path" + "text/template" +) + +func SwaggerUIOAuth2Callback(opts SwaggerUIOpts, next http.Handler) http.Handler { + opts.EnsureDefaults() + + pth := opts.OAuthCallbackURL + tmpl := template.Must(template.New("swaggeroauth").Parse(swaggerOAuthTemplate)) + + buf := bytes.NewBuffer(nil) + _ = tmpl.Execute(buf, &opts) + b := buf.Bytes() + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if path.Join(r.URL.Path) == pth { + rw.Header().Set("Content-Type", "text/html; charset=utf-8") + rw.WriteHeader(http.StatusOK) + + _, _ = rw.Write(b) + return + } + + if next == nil { + rw.Header().Set("Content-Type", "text/plain") + rw.WriteHeader(http.StatusNotFound) + _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) + return + } + next.ServeHTTP(rw, r) + }) +} + +const ( + swaggerOAuthTemplate = ` + + + + {{ .Title }} + + + + + +` +) diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go new file mode 100644 index 0000000000..39a85f7d9e --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go @@ -0,0 +1,286 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package untyped + +import ( + "fmt" + "net/http" + "sort" + "strings" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/errors" + "github.com/go-openapi/loads" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" +) + +// NewAPI creates the default untyped API +func NewAPI(spec *loads.Document) *API { + var an *analysis.Spec + if spec != nil && spec.Spec() != nil { + an = analysis.New(spec.Spec()) + } + api := &API{ + spec: spec, + analyzer: an, + consumers: make(map[string]runtime.Consumer, 10), + producers: make(map[string]runtime.Producer, 10), + authenticators: make(map[string]runtime.Authenticator), + operations: make(map[string]map[string]runtime.OperationHandler), + ServeError: errors.ServeError, + Models: make(map[string]func() interface{}), + formats: strfmt.NewFormats(), + } + return api.WithJSONDefaults() +} + +// API represents an untyped mux for a swagger spec +type API struct { + spec *loads.Document + analyzer *analysis.Spec + DefaultProduces string + DefaultConsumes string + consumers map[string]runtime.Consumer + producers map[string]runtime.Producer + authenticators map[string]runtime.Authenticator + authorizer runtime.Authorizer + operations map[string]map[string]runtime.OperationHandler + ServeError func(http.ResponseWriter, *http.Request, error) + Models map[string]func() interface{} + formats strfmt.Registry +} + +// WithJSONDefaults loads the json defaults for this api +func (d *API) WithJSONDefaults() *API { + d.DefaultConsumes = runtime.JSONMime + d.DefaultProduces = runtime.JSONMime + d.consumers[runtime.JSONMime] = runtime.JSONConsumer() + d.producers[runtime.JSONMime] = runtime.JSONProducer() + return d +} + +// WithoutJSONDefaults clears the json defaults for this api +func (d *API) WithoutJSONDefaults() *API { + d.DefaultConsumes = "" + d.DefaultProduces = "" + delete(d.consumers, runtime.JSONMime) + delete(d.producers, runtime.JSONMime) + return d +} + +// Formats returns the registered string formats +func (d *API) Formats() strfmt.Registry { + if d.formats == nil { + d.formats = strfmt.NewFormats() + } + return d.formats +} + +// RegisterFormat registers a custom format validator +func (d *API) RegisterFormat(name string, format strfmt.Format, validator strfmt.Validator) { + if d.formats == nil { + d.formats = strfmt.NewFormats() + } + d.formats.Add(name, format, validator) +} + +// RegisterAuth registers an auth handler in this api +func (d *API) RegisterAuth(scheme string, handler runtime.Authenticator) { + if d.authenticators == nil { + d.authenticators = make(map[string]runtime.Authenticator) + } + d.authenticators[scheme] = handler +} + +// RegisterAuthorizer registers an authorizer handler in this api +func (d *API) RegisterAuthorizer(handler runtime.Authorizer) { + d.authorizer = handler +} + +// RegisterConsumer registers a consumer for a media type. +func (d *API) RegisterConsumer(mediaType string, handler runtime.Consumer) { + if d.consumers == nil { + d.consumers = make(map[string]runtime.Consumer, 10) + } + d.consumers[strings.ToLower(mediaType)] = handler +} + +// RegisterProducer registers a producer for a media type +func (d *API) RegisterProducer(mediaType string, handler runtime.Producer) { + if d.producers == nil { + d.producers = make(map[string]runtime.Producer, 10) + } + d.producers[strings.ToLower(mediaType)] = handler +} + +// RegisterOperation registers an operation handler for an operation name +func (d *API) RegisterOperation(method, path string, handler runtime.OperationHandler) { + if d.operations == nil { + d.operations = make(map[string]map[string]runtime.OperationHandler, 30) + } + um := strings.ToUpper(method) + if b, ok := d.operations[um]; !ok || b == nil { + d.operations[um] = make(map[string]runtime.OperationHandler) + } + d.operations[um][path] = handler +} + +// OperationHandlerFor returns the operation handler for the specified id if it can be found +func (d *API) OperationHandlerFor(method, path string) (runtime.OperationHandler, bool) { + if d.operations == nil { + return nil, false + } + if pi, ok := d.operations[strings.ToUpper(method)]; ok { + h, ok := pi[path] + return h, ok + } + return nil, false +} + +// ConsumersFor gets the consumers for the specified media types +func (d *API) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { + result := make(map[string]runtime.Consumer) + for _, mt := range mediaTypes { + if consumer, ok := d.consumers[mt]; ok { + result[mt] = consumer + } + } + return result +} + +// ProducersFor gets the producers for the specified media types +func (d *API) ProducersFor(mediaTypes []string) map[string]runtime.Producer { + result := make(map[string]runtime.Producer) + for _, mt := range mediaTypes { + if producer, ok := d.producers[mt]; ok { + result[mt] = producer + } + } + return result +} + +// AuthenticatorsFor gets the authenticators for the specified security schemes +func (d *API) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator { + result := make(map[string]runtime.Authenticator) + for k := range schemes { + if a, ok := d.authenticators[k]; ok { + result[k] = a + } + } + return result +} + +// Authorizer returns the registered authorizer +func (d *API) Authorizer() runtime.Authorizer { + return d.authorizer +} + +// Validate validates this API for any missing items +func (d *API) Validate() error { + return d.validate() +} + +// validateWith validates the registrations in this API against the provided spec analyzer +func (d *API) validate() error { + var consumes []string + for k := range d.consumers { + consumes = append(consumes, k) + } + + var produces []string + for k := range d.producers { + produces = append(produces, k) + } + + var authenticators []string + for k := range d.authenticators { + authenticators = append(authenticators, k) + } + + var operations []string + for m, v := range d.operations { + for p := range v { + operations = append(operations, fmt.Sprintf("%s %s", strings.ToUpper(m), p)) + } + } + + var definedAuths []string + for k := range d.spec.Spec().SecurityDefinitions { + definedAuths = append(definedAuths, k) + } + + if err := d.verify("consumes", consumes, d.analyzer.RequiredConsumes()); err != nil { + return err + } + if err := d.verify("produces", produces, d.analyzer.RequiredProduces()); err != nil { + return err + } + if err := d.verify("operation", operations, d.analyzer.OperationMethodPaths()); err != nil { + return err + } + + requiredAuths := d.analyzer.RequiredSecuritySchemes() + if err := d.verify("auth scheme", authenticators, requiredAuths); err != nil { + return err + } + if err := d.verify("security definitions", definedAuths, requiredAuths); err != nil { + return err + } + return nil +} + +func (d *API) verify(name string, registrations []string, expectations []string) error { + sort.Strings(registrations) + sort.Strings(expectations) + + expected := map[string]struct{}{} + seen := map[string]struct{}{} + + for _, v := range expectations { + expected[v] = struct{}{} + } + + var unspecified []string + for _, v := range registrations { + seen[v] = struct{}{} + if _, ok := expected[v]; !ok { + unspecified = append(unspecified, v) + } + } + + for k := range seen { + delete(expected, k) + } + + var unregistered []string + for k := range expected { + unregistered = append(unregistered, k) + } + sort.Strings(unspecified) + sort.Strings(unregistered) + + if len(unregistered) > 0 || len(unspecified) > 0 { + return &errors.APIVerificationFailed{ + Section: name, + MissingSpecification: unspecified, + MissingRegistration: unregistered, + } + } + + return nil +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/validation.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/validation.go new file mode 100644 index 0000000000..1f0135b578 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/validation.go @@ -0,0 +1,126 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "mime" + "net/http" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + + "github.com/go-openapi/runtime" +) + +type validation struct { + context *Context + result []error + request *http.Request + route *MatchedRoute + bound map[string]interface{} +} + +// ContentType validates the content type of a request +func validateContentType(allowed []string, actual string) error { + debugLog("validating content type for %q against [%s]", actual, strings.Join(allowed, ", ")) + if len(allowed) == 0 { + return nil + } + mt, _, err := mime.ParseMediaType(actual) + if err != nil { + return errors.InvalidContentType(actual, allowed) + } + if swag.ContainsStringsCI(allowed, mt) { + return nil + } + if swag.ContainsStringsCI(allowed, "*/*") { + return nil + } + parts := strings.Split(actual, "/") + if len(parts) == 2 && swag.ContainsStringsCI(allowed, parts[0]+"/*") { + return nil + } + return errors.InvalidContentType(actual, allowed) +} + +func validateRequest(ctx *Context, request *http.Request, route *MatchedRoute) *validation { + debugLog("validating request %s %s", request.Method, request.URL.EscapedPath()) + validate := &validation{ + context: ctx, + request: request, + route: route, + bound: make(map[string]interface{}), + } + + validate.contentType() + if len(validate.result) == 0 { + validate.responseFormat() + } + if len(validate.result) == 0 { + validate.parameters() + } + + return validate +} + +func (v *validation) parameters() { + debugLog("validating request parameters for %s %s", v.request.Method, v.request.URL.EscapedPath()) + if result := v.route.Binder.Bind(v.request, v.route.Params, v.route.Consumer, v.bound); result != nil { + if result.Error() == "validation failure list" { + for _, e := range result.(*errors.Validation).Value.([]interface{}) { + v.result = append(v.result, e.(error)) + } + return + } + v.result = append(v.result, result) + } +} + +func (v *validation) contentType() { + if len(v.result) == 0 && runtime.HasBody(v.request) { + debugLog("validating body content type for %s %s", v.request.Method, v.request.URL.EscapedPath()) + ct, _, req, err := v.context.ContentType(v.request) + if err != nil { + v.result = append(v.result, err) + } else { + v.request = req + } + + if len(v.result) == 0 { + if err := validateContentType(v.route.Consumes, ct); err != nil { + v.result = append(v.result, err) + } + } + if ct != "" && v.route.Consumer == nil { + cons, ok := v.route.Consumers[ct] + if !ok { + v.result = append(v.result, errors.New(500, "no consumer registered for %s", ct)) + } else { + v.route.Consumer = cons + } + } + } +} + +func (v *validation) responseFormat() { + // if the route provides values for Produces and no format could be identify then return an error. + // if the route does not specify values for Produces then treat request as valid since the API designer + // choose not to specify the format for responses. + if str, rCtx := v.context.ResponseFormat(v.request, v.route.Produces); str == "" && len(v.route.Produces) > 0 { + v.request = rCtx + v.result = append(v.result, errors.InvalidResponseFormat(v.request.Header.Get(runtime.HeaderAccept), v.route.Produces)) + } +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/request.go b/test/tools/vendor/github.com/go-openapi/runtime/request.go new file mode 100644 index 0000000000..078fda1739 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/request.go @@ -0,0 +1,139 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bufio" + "io" + "net/http" + "strings" + + "github.com/go-openapi/swag" +) + +// CanHaveBody returns true if this method can have a body +func CanHaveBody(method string) bool { + mn := strings.ToUpper(method) + return mn == "POST" || mn == "PUT" || mn == "PATCH" || mn == "DELETE" +} + +// IsSafe returns true if this is a request with a safe method +func IsSafe(r *http.Request) bool { + mn := strings.ToUpper(r.Method) + return mn == "GET" || mn == "HEAD" +} + +// AllowsBody returns true if the request allows for a body +func AllowsBody(r *http.Request) bool { + mn := strings.ToUpper(r.Method) + return mn != "HEAD" +} + +// HasBody returns true if this method needs a content-type +func HasBody(r *http.Request) bool { + // happy case: we have a content length set + if r.ContentLength > 0 { + return true + } + + if r.Header.Get("content-length") != "" { + // in this case, no Transfer-Encoding should be present + // we have a header set but it was explicitly set to 0, so we assume no body + return false + } + + rdr := newPeekingReader(r.Body) + r.Body = rdr + return rdr.HasContent() +} + +func newPeekingReader(r io.ReadCloser) *peekingReader { + if r == nil { + return nil + } + return &peekingReader{ + underlying: bufio.NewReader(r), + orig: r, + } +} + +type peekingReader struct { + underlying interface { + Buffered() int + Peek(int) ([]byte, error) + Read([]byte) (int, error) + } + orig io.ReadCloser +} + +func (p *peekingReader) HasContent() bool { + if p == nil { + return false + } + if p.underlying.Buffered() > 0 { + return true + } + b, err := p.underlying.Peek(1) + if err != nil { + return false + } + return len(b) > 0 +} + +func (p *peekingReader) Read(d []byte) (int, error) { + if p == nil { + return 0, io.EOF + } + return p.underlying.Read(d) +} + +func (p *peekingReader) Close() error { + p.underlying = nil + if p.orig != nil { + return p.orig.Close() + } + return nil +} + +// JSONRequest creates a new http request with json headers set +func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) { + req, err := http.NewRequest(method, urlStr, body) + if err != nil { + return nil, err + } + req.Header.Add(HeaderContentType, JSONMime) + req.Header.Add(HeaderAccept, JSONMime) + return req, nil +} + +// Gettable for things with a method GetOK(string) (data string, hasKey bool, hasValue bool) +type Gettable interface { + GetOK(string) ([]string, bool, bool) +} + +// ReadSingleValue reads a single value from the source +func ReadSingleValue(values Gettable, name string) string { + vv, _, hv := values.GetOK(name) + if hv { + return vv[len(vv)-1] + } + return "" +} + +// ReadCollectionValue reads a collection value from a string data source +func ReadCollectionValue(values Gettable, name, collectionFormat string) []string { + v := ReadSingleValue(values, name) + return swag.SplitByFormat(v, collectionFormat) +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/security/authenticator.go b/test/tools/vendor/github.com/go-openapi/runtime/security/authenticator.go new file mode 100644 index 0000000000..c3ffdac7e8 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/security/authenticator.go @@ -0,0 +1,276 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package security + +import ( + "context" + "net/http" + "strings" + + "github.com/go-openapi/errors" + + "github.com/go-openapi/runtime" +) + +const ( + query = "query" + header = "header" +) + +// HttpAuthenticator is a function that authenticates a HTTP request +func HttpAuthenticator(handler func(*http.Request) (bool, interface{}, error)) runtime.Authenticator { + return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) { + if request, ok := params.(*http.Request); ok { + return handler(request) + } + if scoped, ok := params.(*ScopedAuthRequest); ok { + return handler(scoped.Request) + } + return false, nil, nil + }) +} + +// ScopedAuthenticator is a function that authenticates a HTTP request against a list of valid scopes +func ScopedAuthenticator(handler func(*ScopedAuthRequest) (bool, interface{}, error)) runtime.Authenticator { + return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) { + if request, ok := params.(*ScopedAuthRequest); ok { + return handler(request) + } + return false, nil, nil + }) +} + +// UserPassAuthentication authentication function +type UserPassAuthentication func(string, string) (interface{}, error) + +// UserPassAuthenticationCtx authentication function with context.Context +type UserPassAuthenticationCtx func(context.Context, string, string) (context.Context, interface{}, error) + +// TokenAuthentication authentication function +type TokenAuthentication func(string) (interface{}, error) + +// TokenAuthenticationCtx authentication function with context.Context +type TokenAuthenticationCtx func(context.Context, string) (context.Context, interface{}, error) + +// ScopedTokenAuthentication authentication function +type ScopedTokenAuthentication func(string, []string) (interface{}, error) + +// ScopedTokenAuthenticationCtx authentication function with context.Context +type ScopedTokenAuthenticationCtx func(context.Context, string, []string) (context.Context, interface{}, error) + +var DefaultRealmName = "API" + +type secCtxKey uint8 + +const ( + failedBasicAuth secCtxKey = iota + oauth2SchemeName +) + +func FailedBasicAuth(r *http.Request) string { + return FailedBasicAuthCtx(r.Context()) +} + +func FailedBasicAuthCtx(ctx context.Context) string { + v, ok := ctx.Value(failedBasicAuth).(string) + if !ok { + return "" + } + return v +} + +func OAuth2SchemeName(r *http.Request) string { + return OAuth2SchemeNameCtx(r.Context()) +} + +func OAuth2SchemeNameCtx(ctx context.Context) string { + v, ok := ctx.Value(oauth2SchemeName).(string) + if !ok { + return "" + } + return v +} + +// BasicAuth creates a basic auth authenticator with the provided authentication function +func BasicAuth(authenticate UserPassAuthentication) runtime.Authenticator { + return BasicAuthRealm(DefaultRealmName, authenticate) +} + +// BasicAuthRealm creates a basic auth authenticator with the provided authentication function and realm name +func BasicAuthRealm(realm string, authenticate UserPassAuthentication) runtime.Authenticator { + if realm == "" { + realm = DefaultRealmName + } + + return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { + if usr, pass, ok := r.BasicAuth(); ok { + p, err := authenticate(usr, pass) + if err != nil { + *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) + } + return true, p, err + } + *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) + return false, nil, nil + }) +} + +// BasicAuthCtx creates a basic auth authenticator with the provided authentication function with support for context.Context +func BasicAuthCtx(authenticate UserPassAuthenticationCtx) runtime.Authenticator { + return BasicAuthRealmCtx(DefaultRealmName, authenticate) +} + +// BasicAuthRealmCtx creates a basic auth authenticator with the provided authentication function and realm name with support for context.Context +func BasicAuthRealmCtx(realm string, authenticate UserPassAuthenticationCtx) runtime.Authenticator { + if realm == "" { + realm = DefaultRealmName + } + + return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { + if usr, pass, ok := r.BasicAuth(); ok { + ctx, p, err := authenticate(r.Context(), usr, pass) + if err != nil { + ctx = context.WithValue(ctx, failedBasicAuth, realm) + } + *r = *r.WithContext(ctx) + return true, p, err + } + *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) + return false, nil, nil + }) +} + +// APIKeyAuth creates an authenticator that uses a token for authorization. +// This token can be obtained from either a header or a query string +func APIKeyAuth(name, in string, authenticate TokenAuthentication) runtime.Authenticator { + inl := strings.ToLower(in) + if inl != query && inl != header { + // panic because this is most likely a typo + panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\".")) + } + + var getToken func(*http.Request) string + switch inl { + case header: + getToken = func(r *http.Request) string { return r.Header.Get(name) } + case query: + getToken = func(r *http.Request) string { return r.URL.Query().Get(name) } + } + + return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { + token := getToken(r) + if token == "" { + return false, nil, nil + } + + p, err := authenticate(token) + return true, p, err + }) +} + +// APIKeyAuthCtx creates an authenticator that uses a token for authorization with support for context.Context. +// This token can be obtained from either a header or a query string +func APIKeyAuthCtx(name, in string, authenticate TokenAuthenticationCtx) runtime.Authenticator { + inl := strings.ToLower(in) + if inl != query && inl != header { + // panic because this is most likely a typo + panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\".")) + } + + var getToken func(*http.Request) string + switch inl { + case header: + getToken = func(r *http.Request) string { return r.Header.Get(name) } + case query: + getToken = func(r *http.Request) string { return r.URL.Query().Get(name) } + } + + return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { + token := getToken(r) + if token == "" { + return false, nil, nil + } + + ctx, p, err := authenticate(r.Context(), token) + *r = *r.WithContext(ctx) + return true, p, err + }) +} + +// ScopedAuthRequest contains both a http request and the required scopes for a particular operation +type ScopedAuthRequest struct { + Request *http.Request + RequiredScopes []string +} + +// BearerAuth for use with oauth2 flows +func BearerAuth(name string, authenticate ScopedTokenAuthentication) runtime.Authenticator { + const prefix = "Bearer " + return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) { + var token string + hdr := r.Request.Header.Get(runtime.HeaderAuthorization) + if strings.HasPrefix(hdr, prefix) { + token = strings.TrimPrefix(hdr, prefix) + } + if token == "" { + qs := r.Request.URL.Query() + token = qs.Get("access_token") + } + //#nosec + ct, _, _ := runtime.ContentType(r.Request.Header) + if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { + token = r.Request.FormValue("access_token") + } + + if token == "" { + return false, nil, nil + } + + rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name) + *r.Request = *r.Request.WithContext(rctx) + p, err := authenticate(token, r.RequiredScopes) + return true, p, err + }) +} + +// BearerAuthCtx for use with oauth2 flows with support for context.Context. +func BearerAuthCtx(name string, authenticate ScopedTokenAuthenticationCtx) runtime.Authenticator { + const prefix = "Bearer " + return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) { + var token string + hdr := r.Request.Header.Get(runtime.HeaderAuthorization) + if strings.HasPrefix(hdr, prefix) { + token = strings.TrimPrefix(hdr, prefix) + } + if token == "" { + qs := r.Request.URL.Query() + token = qs.Get("access_token") + } + //#nosec + ct, _, _ := runtime.ContentType(r.Request.Header) + if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { + token = r.Request.FormValue("access_token") + } + + if token == "" { + return false, nil, nil + } + + rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name) + ctx, p, err := authenticate(rctx, token, r.RequiredScopes) + *r.Request = *r.Request.WithContext(ctx) + return true, p, err + }) +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/security/authorizer.go b/test/tools/vendor/github.com/go-openapi/runtime/security/authorizer.go new file mode 100644 index 0000000000..00c1a4d6a4 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/security/authorizer.go @@ -0,0 +1,27 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package security + +import ( + "net/http" + + "github.com/go-openapi/runtime" +) + +// Authorized provides a default implementation of the Authorizer interface where all +// requests are authorized (successful) +func Authorized() runtime.Authorizer { + return runtime.AuthorizerFunc(func(_ *http.Request, _ interface{}) error { return nil }) +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/statuses.go b/test/tools/vendor/github.com/go-openapi/runtime/statuses.go new file mode 100644 index 0000000000..3b011a0bff --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/statuses.go @@ -0,0 +1,90 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +// Statuses lists the most common HTTP status codes to default message +// taken from https://httpstatuses.com/ +var Statuses = map[int]string{ + 100: "Continue", + 101: "Switching Protocols", + 102: "Processing", + 103: "Checkpoint", + 122: "URI too long", + 200: "OK", + 201: "Created", + 202: "Accepted", + 203: "Request Processed", + 204: "No Content", + 205: "Reset Content", + 206: "Partial Content", + 207: "Multi-Status", + 208: "Already Reported", + 226: "IM Used", + 300: "Multiple Choices", + 301: "Moved Permanently", + 302: "Found", + 303: "See Other", + 304: "Not Modified", + 305: "Use Proxy", + 306: "Switch Proxy", + 307: "Temporary Redirect", + 308: "Permanent Redirect", + 400: "Bad Request", + 401: "Unauthorized", + 402: "Payment Required", + 403: "Forbidden", + 404: "Not Found", + 405: "Method Not Allowed", + 406: "Not Acceptable", + 407: "Proxy Authentication Required", + 408: "Request Timeout", + 409: "Conflict", + 410: "Gone", + 411: "Length Required", + 412: "Precondition Failed", + 413: "Request Entity Too Large", + 414: "Request-URI Too Long", + 415: "Unsupported Media Type", + 416: "Request Range Not Satisfiable", + 417: "Expectation Failed", + 418: "I'm a teapot", + 420: "Enhance Your Calm", + 422: "Unprocessable Entity", + 423: "Locked", + 424: "Failed Dependency", + 426: "Upgrade Required", + 428: "Precondition Required", + 429: "Too Many Requests", + 431: "Request Header Fields Too Large", + 444: "No Response", + 449: "Retry With", + 450: "Blocked by Windows Parental Controls", + 451: "Wrong Exchange Server", + 499: "Client Closed Request", + 500: "Internal Server Error", + 501: "Not Implemented", + 502: "Bad Gateway", + 503: "Service Unavailable", + 504: "Gateway Timeout", + 505: "HTTP Version Not Supported", + 506: "Variant Also Negotiates", + 507: "Insufficient Storage", + 508: "Loop Detected", + 509: "Bandwidth Limit Exceeded", + 510: "Not Extended", + 511: "Network Authentication Required", + 598: "Network read timeout error", + 599: "Network connect timeout error", +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/text.go b/test/tools/vendor/github.com/go-openapi/runtime/text.go new file mode 100644 index 0000000000..f33320b7dd --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/text.go @@ -0,0 +1,116 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + + "github.com/go-openapi/swag" +) + +// TextConsumer creates a new text consumer +func TextConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + if reader == nil { + return errors.New("TextConsumer requires a reader") // early exit + } + + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(reader) + if err != nil { + return err + } + b := buf.Bytes() + + // If the buffer is empty, no need to unmarshal it, which causes a panic. + if len(b) == 0 { + return nil + } + + if tu, ok := data.(encoding.TextUnmarshaler); ok { + err := tu.UnmarshalText(b) + if err != nil { + return fmt.Errorf("text consumer: %v", err) + } + + return nil + } + + t := reflect.TypeOf(data) + if data != nil && t.Kind() == reflect.Ptr { + v := reflect.Indirect(reflect.ValueOf(data)) + if t.Elem().Kind() == reflect.String { + v.SetString(string(b)) + return nil + } + } + + return fmt.Errorf("%v (%T) is not supported by the TextConsumer, %s", + data, data, "can be resolved by supporting TextUnmarshaler interface") + }) +} + +// TextProducer creates a new text producer +func TextProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + if writer == nil { + return errors.New("TextProducer requires a writer") // early exit + } + + if data == nil { + return errors.New("no data given to produce text from") + } + + if tm, ok := data.(encoding.TextMarshaler); ok { + txt, err := tm.MarshalText() + if err != nil { + return fmt.Errorf("text producer: %v", err) + } + _, err = writer.Write(txt) + return err + } + + if str, ok := data.(error); ok { + _, err := writer.Write([]byte(str.Error())) + return err + } + + if str, ok := data.(fmt.Stringer); ok { + _, err := writer.Write([]byte(str.String())) + return err + } + + v := reflect.Indirect(reflect.ValueOf(data)) + if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { + b, err := swag.WriteJSON(data) + if err != nil { + return err + } + _, err = writer.Write(b) + return err + } + if v.Kind() != reflect.String { + return fmt.Errorf("%T is not a supported type by the TextProducer", data) + } + + _, err := writer.Write([]byte(v.String())) + return err + }) +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/values.go b/test/tools/vendor/github.com/go-openapi/runtime/values.go new file mode 100644 index 0000000000..11f5732af4 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/values.go @@ -0,0 +1,19 @@ +package runtime + +// Values typically represent parameters on a http request. +type Values map[string][]string + +// GetOK returns the values collection for the given key. +// When the key is present in the map it will return true for hasKey. +// When the value is not empty it will return true for hasValue. +func (v Values) GetOK(key string) (value []string, hasKey bool, hasValue bool) { + value, hasKey = v[key] + if !hasKey { + return + } + if len(value) == 0 { + return + } + hasValue = true + return +} diff --git a/test/tools/vendor/github.com/go-openapi/runtime/xml.go b/test/tools/vendor/github.com/go-openapi/runtime/xml.go new file mode 100644 index 0000000000..821c7393df --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/runtime/xml.go @@ -0,0 +1,36 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "encoding/xml" + "io" +) + +// XMLConsumer creates a new XML consumer +func XMLConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + dec := xml.NewDecoder(reader) + return dec.Decode(data) + }) +} + +// XMLProducer creates a new XML producer +func XMLProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + enc := xml.NewEncoder(writer) + return enc.Encode(data) + }) +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/.editorconfig b/test/tools/vendor/github.com/go-openapi/spec/.editorconfig new file mode 100644 index 0000000000..3152da69a5 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/test/tools/vendor/github.com/go-openapi/spec/.gitignore b/test/tools/vendor/github.com/go-openapi/spec/.gitignore new file mode 100644 index 0000000000..dd91ed6a04 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/test/tools/vendor/github.com/go-openapi/spec/.golangci.yml b/test/tools/vendor/github.com/go-openapi/spec/.golangci.yml new file mode 100644 index 0000000000..835d55e742 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/.golangci.yml @@ -0,0 +1,42 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 2 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort diff --git a/test/tools/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/test/tools/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/test/tools/vendor/github.com/go-openapi/spec/LICENSE b/test/tools/vendor/github.com/go-openapi/spec/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/tools/vendor/github.com/go-openapi/spec/README.md b/test/tools/vendor/github.com/go-openapi/spec/README.md new file mode 100644 index 0000000000..18782c6daf --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/README.md @@ -0,0 +1,34 @@ +# OAI object model + +[![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) + +[![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/spec.svg)](https://pkg.go.dev/github.com/go-openapi/spec) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec) + +The object model for OpenAPI specification documents. + +### FAQ + +* What does this do? + +> 1. This package knows how to marshal and unmarshal Swagger API specifications into a golang object model +> 2. It knows how to resolve $ref and expand them to make a single root document + +* How does it play with the rest of the go-openapi packages ? + +> 1. This package is at the core of the go-openapi suite of packages and [code generator](https://github.com/go-swagger/go-swagger) +> 2. There is a [spec loading package](https://github.com/go-openapi/loads) to fetch specs as JSON or YAML from local or remote locations +> 3. There is a [spec validation package](https://github.com/go-openapi/validate) built on top of it +> 4. There is a [spec analysis package](https://github.com/go-openapi/analysis) built on top of it, to analyze, flatten, fix and merge spec documents + +* Does this library support OpenAPI 3? + +> No. +> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). +> There is no plan to make it evolve toward supporting OpenAPI 3.x. +> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. +> +> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 diff --git a/vendor/github.com/go-openapi/spec/appveyor.yml b/test/tools/vendor/github.com/go-openapi/spec/appveyor.yml similarity index 100% rename from vendor/github.com/go-openapi/spec/appveyor.yml rename to test/tools/vendor/github.com/go-openapi/spec/appveyor.yml diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/test/tools/vendor/github.com/go-openapi/spec/bindata.go similarity index 100% rename from vendor/github.com/go-openapi/spec/bindata.go rename to test/tools/vendor/github.com/go-openapi/spec/bindata.go diff --git a/test/tools/vendor/github.com/go-openapi/spec/cache.go b/test/tools/vendor/github.com/go-openapi/spec/cache.go new file mode 100644 index 0000000000..122993b44b --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/cache.go @@ -0,0 +1,98 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "sync" +) + +// ResolutionCache a cache for resolving urls +type ResolutionCache interface { + Get(string) (interface{}, bool) + Set(string, interface{}) +} + +type simpleCache struct { + lock sync.RWMutex + store map[string]interface{} +} + +func (s *simpleCache) ShallowClone() ResolutionCache { + store := make(map[string]interface{}, len(s.store)) + s.lock.RLock() + for k, v := range s.store { + store[k] = v + } + s.lock.RUnlock() + + return &simpleCache{ + store: store, + } +} + +// Get retrieves a cached URI +func (s *simpleCache) Get(uri string) (interface{}, bool) { + s.lock.RLock() + v, ok := s.store[uri] + + s.lock.RUnlock() + return v, ok +} + +// Set caches a URI +func (s *simpleCache) Set(uri string, data interface{}) { + s.lock.Lock() + s.store[uri] = data + s.lock.Unlock() +} + +var ( + // resCache is a package level cache for $ref resolution and expansion. + // It is initialized lazily by methods that have the need for it: no + // memory is allocated unless some expander methods are called. + // + // It is initialized with JSON schema and swagger schema, + // which do not mutate during normal operations. + // + // All subsequent utilizations of this cache are produced from a shallow + // clone of this initial version. + resCache *simpleCache + onceCache sync.Once + + _ ResolutionCache = &simpleCache{} +) + +// initResolutionCache initializes the URI resolution cache. To be wrapped in a sync.Once.Do call. +func initResolutionCache() { + resCache = defaultResolutionCache() +} + +func defaultResolutionCache() *simpleCache { + return &simpleCache{store: map[string]interface{}{ + "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), + "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), + }} +} + +func cacheOrDefault(cache ResolutionCache) ResolutionCache { + onceCache.Do(initResolutionCache) + + if cache != nil { + return cache + } + + // get a shallow clone of the base cache with swagger and json schema + return resCache.ShallowClone() +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/contact_info.go b/test/tools/vendor/github.com/go-openapi/spec/contact_info.go new file mode 100644 index 0000000000..2f7bb219b5 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/contact_info.go @@ -0,0 +1,57 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" +) + +// ContactInfo contact information for the exposed API. +// +// For more information: http://goo.gl/8us55a#contactObject +type ContactInfo struct { + ContactInfoProps + VendorExtensible +} + +// ContactInfoProps hold the properties of a ContactInfo object +type ContactInfoProps struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` + Email string `json:"email,omitempty"` +} + +// UnmarshalJSON hydrates ContactInfo from json +func (c *ContactInfo) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &c.ContactInfoProps); err != nil { + return err + } + return json.Unmarshal(data, &c.VendorExtensible) +} + +// MarshalJSON produces ContactInfo as json +func (c ContactInfo) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(c.ContactInfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(c.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/debug.go b/test/tools/vendor/github.com/go-openapi/spec/debug.go new file mode 100644 index 0000000000..fc889f6d0b --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/debug.go @@ -0,0 +1,49 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "fmt" + "log" + "os" + "path" + "runtime" +) + +// Debug is true when the SWAGGER_DEBUG env var is not empty. +// +// It enables a more verbose logging of this package. +var Debug = os.Getenv("SWAGGER_DEBUG") != "" + +var ( + // specLogger is a debug logger for this package + specLogger *log.Logger +) + +func init() { + debugOptions() +} + +func debugOptions() { + specLogger = log.New(os.Stdout, "spec:", log.LstdFlags) +} + +func debugLog(msg string, args ...interface{}) { + // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() + if Debug { + _, file1, pos1, _ := runtime.Caller(1) + specLogger.Printf("%s:%d: %s", path.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/errors.go b/test/tools/vendor/github.com/go-openapi/spec/errors.go new file mode 100644 index 0000000000..6992c7ba73 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/errors.go @@ -0,0 +1,19 @@ +package spec + +import "errors" + +// Error codes +var ( + // ErrUnknownTypeForReference indicates that a resolved reference was found in an unsupported container type + ErrUnknownTypeForReference = errors.New("unknown type for the resolved reference") + + // ErrResolveRefNeedsAPointer indicates that a $ref target must be a valid JSON pointer + ErrResolveRefNeedsAPointer = errors.New("resolve ref: target needs to be a pointer") + + // ErrDerefUnsupportedType indicates that a resolved reference was found in an unsupported container type. + // At the moment, $ref are supported only inside: schemas, parameters, responses, path items + ErrDerefUnsupportedType = errors.New("deref: unsupported type") + + // ErrExpandUnsupportedType indicates that $ref expansion is attempted on some invalid type + ErrExpandUnsupportedType = errors.New("expand: unsupported type. Input should be of type *Parameter or *Response") +) diff --git a/test/tools/vendor/github.com/go-openapi/spec/expander.go b/test/tools/vendor/github.com/go-openapi/spec/expander.go new file mode 100644 index 0000000000..d4ea889d44 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/expander.go @@ -0,0 +1,594 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" +) + +// ExpandOptions provides options for the spec expander. +// +// RelativeBase is the path to the root document. This can be a remote URL or a path to a local file. +// +// If left empty, the root document is assumed to be located in the current working directory: +// all relative $ref's will be resolved from there. +// +// PathLoader injects a document loading method. By default, this resolves to the function provided by the SpecLoader package variable. +// +type ExpandOptions struct { + RelativeBase string // the path to the root document to expand. This is a file, not a directory + SkipSchemas bool // do not expand schemas, just paths, parameters and responses + ContinueOnError bool // continue expanding even after and error is found + PathLoader func(string) (json.RawMessage, error) `json:"-"` // the document loading method that takes a path as input and yields a json document + AbsoluteCircularRef bool // circular $ref remaining after expansion remain absolute URLs +} + +func optionsOrDefault(opts *ExpandOptions) *ExpandOptions { + if opts != nil { + clone := *opts // shallow clone to avoid internal changes to be propagated to the caller + if clone.RelativeBase != "" { + clone.RelativeBase = normalizeBase(clone.RelativeBase) + } + // if the relative base is empty, let the schema loader choose a pseudo root document + return &clone + } + return &ExpandOptions{} +} + +// ExpandSpec expands the references in a swagger spec +func ExpandSpec(spec *Swagger, options *ExpandOptions) error { + options = optionsOrDefault(options) + resolver := defaultSchemaLoader(spec, options, nil, nil) + + specBasePath := options.RelativeBase + + if !options.SkipSchemas { + for key, definition := range spec.Definitions { + parentRefs := make([]string, 0, 10) + parentRefs = append(parentRefs, fmt.Sprintf("#/definitions/%s", key)) + + def, err := expandSchema(definition, parentRefs, resolver, specBasePath) + if resolver.shouldStopOnError(err) { + return err + } + if def != nil { + spec.Definitions[key] = *def + } + } + } + + for key := range spec.Parameters { + parameter := spec.Parameters[key] + if err := expandParameterOrResponse(¶meter, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Parameters[key] = parameter + } + + for key := range spec.Responses { + response := spec.Responses[key] + if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Responses[key] = response + } + + if spec.Paths != nil { + for key := range spec.Paths.Paths { + pth := spec.Paths.Paths[key] + if err := expandPathItem(&pth, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Paths.Paths[key] = pth + } + } + + return nil +} + +const rootBase = ".root" + +// baseForRoot loads in the cache the root document and produces a fake ".root" base path entry +// for further $ref resolution +// +// Setting the cache is optional and this parameter may safely be left to nil. +func baseForRoot(root interface{}, cache ResolutionCache) string { + if root == nil { + return "" + } + + // cache the root document to resolve $ref's + normalizedBase := normalizeBase(rootBase) + cache.Set(normalizedBase, root) + + return normalizedBase +} + +// ExpandSchema expands the refs in the schema object with reference to the root object. +// +// go-openapi/validate uses this function. +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandSchemaWithBasePath to resolve external references). +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + if root == nil { + root = schema + } + + opts := &ExpandOptions{ + // when a root is specified, cache the root as an in-memory document for $ref retrieval + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + } + + return ExpandSchemaWithBasePath(schema, cache, opts) +} + +// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options. +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *ExpandOptions) error { + if schema == nil { + return nil + } + + cache = cacheOrDefault(cache) + + opts = optionsOrDefault(opts) + + resolver := defaultSchemaLoader(nil, opts, cache, nil) + + parentRefs := make([]string, 0, 10) + s, err := expandSchema(*schema, parentRefs, resolver, opts.RelativeBase) + if err != nil { + return err + } + if s != nil { + // guard for when continuing on error + *schema = *s + } + + return nil +} + +func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Items == nil { + return &target, nil + } + + // array + if target.Items.Schema != nil { + t, err := expandSchema(*target.Items.Schema, parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + *target.Items.Schema = *t + } + + // tuple + for i := range target.Items.Schemas { + t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + target.Items.Schemas[i] = *t + } + + return &target, nil +} + +func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Ref.String() == "" && target.Ref.IsRoot() { + newRef := normalizeRef(&target.Ref, basePath) + target.Ref = *newRef + return &target, nil + } + + // change the base path of resolution when an ID is encountered + // otherwise the basePath should inherit the parent's + if target.ID != "" { + basePath, _ = resolver.setSchemaID(target, target.ID, basePath) + } + + if target.Ref.String() != "" { + return expandSchemaRef(target, parentRefs, resolver, basePath) + } + + for k := range target.Definitions { + tt, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if tt != nil { + target.Definitions[k] = *tt + } + } + + t, err := expandItems(target, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target = *t + } + + for i := range target.AllOf { + t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.AllOf[i] = *t + } + } + + for i := range target.AnyOf { + t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.AnyOf[i] = *t + } + } + + for i := range target.OneOf { + t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.OneOf[i] = *t + } + } + + if target.Not != nil { + t, err := expandSchema(*target.Not, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Not = *t + } + } + + for k := range target.Properties { + t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.Properties[k] = *t + } + } + + if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { + t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalProperties.Schema = *t + } + } + + for k := range target.PatternProperties { + t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.PatternProperties[k] = *t + } + } + + for k := range target.Dependencies { + if target.Dependencies[k].Schema != nil { + t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Dependencies[k].Schema = *t + } + } + } + + if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { + t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalItems.Schema = *t + } + } + return &target, nil +} + +func expandSchemaRef(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + // if a Ref is found, all sibling fields are skipped + // Ref also changes the resolution scope of children expandSchema + + // here the resolution scope is changed because a $ref was encountered + normalizedRef := normalizeRef(&target.Ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if resolver.isCircular(normalizedRef, basePath, parentRefs...) { + // this means there is a cycle in the recursion tree: return the Ref + // - circular refs cannot be expanded. We leave them as ref. + // - denormalization means that a new local file ref is set relative to the original basePath + debugLog("short circuit circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s", + basePath, normalizedBasePath, normalizedRef.String()) + if !resolver.options.AbsoluteCircularRef { + target.Ref = denormalizeRef(normalizedRef, resolver.context.basePath, resolver.context.rootID) + } else { + target.Ref = *normalizedRef + } + return &target, nil + } + + var t *Schema + err := resolver.Resolve(&target.Ref, &t, basePath) + if resolver.shouldStopOnError(err) { + return nil, err + } + + if t == nil { + // guard for when continuing on error + return &target, nil + } + + parentRefs = append(parentRefs, normalizedRef.String()) + transitiveResolver := resolver.transitiveResolver(basePath, target.Ref) + + basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath) + + return expandSchema(*t, parentRefs, transitiveResolver, basePath) +} + +func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error { + if pathItem == nil { + return nil + } + + parentRefs := make([]string, 0, 10) + if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + + if pathItem.Ref.String() != "" { + transitiveResolver := resolver.transitiveResolver(basePath, pathItem.Ref) + basePath = transitiveResolver.updateBasePath(resolver, basePath) + resolver = transitiveResolver + } + + pathItem.Ref = Ref{} + for i := range pathItem.Parameters { + if err := expandParameterOrResponse(&(pathItem.Parameters[i]), resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + + ops := []*Operation{ + pathItem.Get, + pathItem.Head, + pathItem.Options, + pathItem.Put, + pathItem.Post, + pathItem.Patch, + pathItem.Delete, + } + for _, op := range ops { + if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + + return nil +} + +func expandOperation(op *Operation, resolver *schemaLoader, basePath string) error { + if op == nil { + return nil + } + + for i := range op.Parameters { + param := op.Parameters[i] + if err := expandParameterOrResponse(¶m, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + op.Parameters[i] = param + } + + if op.Responses == nil { + return nil + } + + responses := op.Responses + if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + + for code := range responses.StatusCodeResponses { + response := responses.StatusCodeResponses[code] + if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + responses.StatusCodeResponses[code] = response + } + + return nil +} + +// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandResponse to resolve external references). +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + } + resolver := defaultSchemaLoader(root, opts, cache, nil) + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandResponse expands a response based on a basepath +// +// All refs inside response will be resolved relative to basePath +func ExpandResponse(response *Response, basePath string) error { + opts := optionsOrDefault(&ExpandOptions{ + RelativeBase: basePath, + }) + resolver := defaultSchemaLoader(nil, opts, nil, nil) + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document. +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandParameter to resolve external references). +func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + } + resolver := defaultSchemaLoader(root, opts, cache, nil) + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +// ExpandParameter expands a parameter based on a basepath. +// This is the exported version of expandParameter +// all refs inside parameter will be resolved relative to basePath +func ExpandParameter(parameter *Parameter, basePath string) error { + opts := optionsOrDefault(&ExpandOptions{ + RelativeBase: basePath, + }) + resolver := defaultSchemaLoader(nil, opts, nil, nil) + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +func getRefAndSchema(input interface{}) (*Ref, *Schema, error) { + var ( + ref *Ref + sch *Schema + ) + + switch refable := input.(type) { + case *Parameter: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + case *Response: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + default: + return nil, nil, fmt.Errorf("unsupported type: %T: %w", input, ErrExpandUnsupportedType) + } + + return ref, sch, nil +} + +func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error { + ref, _, err := getRefAndSchema(input) + if err != nil { + return err + } + + if ref == nil { + return nil + } + + parentRefs := make([]string, 0, 10) + if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + + ref, sch, _ := getRefAndSchema(input) + if ref.String() != "" { + transitiveResolver := resolver.transitiveResolver(basePath, *ref) + basePath = resolver.updateBasePath(transitiveResolver, basePath) + resolver = transitiveResolver + } + + if sch == nil { + // nothing to be expanded + if ref != nil { + *ref = Ref{} + } + return nil + } + + if sch.Ref.String() != "" { + rebasedRef, ern := NewRef(normalizeURI(sch.Ref.String(), basePath)) + if ern != nil { + return ern + } + + switch { + case resolver.isCircular(&rebasedRef, basePath, parentRefs...): + // this is a circular $ref: stop expansion + if !resolver.options.AbsoluteCircularRef { + sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) + } else { + sch.Ref = rebasedRef + } + case !resolver.options.SkipSchemas: + // schema expanded to a $ref in another root + sch.Ref = rebasedRef + debugLog("rebased to: %s", sch.Ref.String()) + default: + // skip schema expansion but rebase $ref to schema + sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) + } + } + + if ref != nil { + *ref = Ref{} + } + + // expand schema + if !resolver.options.SkipSchemas { + s, err := expandSchema(*sch, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return err + } + if s == nil { + // guard for when continuing on error + return nil + } + *sch = *s + } + + return nil +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/external_docs.go b/test/tools/vendor/github.com/go-openapi/spec/external_docs.go new file mode 100644 index 0000000000..88add91b2b --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/external_docs.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// ExternalDocumentation allows referencing an external resource for +// extended documentation. +// +// For more information: http://goo.gl/8us55a#externalDocumentationObject +type ExternalDocumentation struct { + Description string `json:"description,omitempty"` + URL string `json:"url,omitempty"` +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/header.go b/test/tools/vendor/github.com/go-openapi/spec/header.go new file mode 100644 index 0000000000..9dfd17b185 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/header.go @@ -0,0 +1,203 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + jsonArray = "array" +) + +// HeaderProps describes a response header +type HeaderProps struct { + Description string `json:"description,omitempty"` +} + +// Header describes a header for a response of the API +// +// For more information: http://goo.gl/8us55a#headerObject +type Header struct { + CommonValidations + SimpleSchema + VendorExtensible + HeaderProps +} + +// ResponseHeader creates a new header instance for use in a response +func ResponseHeader() *Header { + return new(Header) +} + +// WithDescription sets the description on this response, allows for chaining +func (h *Header) WithDescription(description string) *Header { + h.Description = description + return h +} + +// Typed a fluent builder method for the type of parameter +func (h *Header) Typed(tpe, format string) *Header { + h.Type = tpe + h.Format = format + return h +} + +// CollectionOf a fluent builder method for an array item +func (h *Header) CollectionOf(items *Items, format string) *Header { + h.Type = jsonArray + h.Items = items + h.CollectionFormat = format + return h +} + +// WithDefault sets the default value on this item +func (h *Header) WithDefault(defaultValue interface{}) *Header { + h.Default = defaultValue + return h +} + +// WithMaxLength sets a max length value +func (h *Header) WithMaxLength(max int64) *Header { + h.MaxLength = &max + return h +} + +// WithMinLength sets a min length value +func (h *Header) WithMinLength(min int64) *Header { + h.MinLength = &min + return h +} + +// WithPattern sets a pattern value +func (h *Header) WithPattern(pattern string) *Header { + h.Pattern = pattern + return h +} + +// WithMultipleOf sets a multiple of value +func (h *Header) WithMultipleOf(number float64) *Header { + h.MultipleOf = &number + return h +} + +// WithMaximum sets a maximum number value +func (h *Header) WithMaximum(max float64, exclusive bool) *Header { + h.Maximum = &max + h.ExclusiveMaximum = exclusive + return h +} + +// WithMinimum sets a minimum number value +func (h *Header) WithMinimum(min float64, exclusive bool) *Header { + h.Minimum = &min + h.ExclusiveMinimum = exclusive + return h +} + +// WithEnum sets a the enum values (replace) +func (h *Header) WithEnum(values ...interface{}) *Header { + h.Enum = append([]interface{}{}, values...) + return h +} + +// WithMaxItems sets the max items +func (h *Header) WithMaxItems(size int64) *Header { + h.MaxItems = &size + return h +} + +// WithMinItems sets the min items +func (h *Header) WithMinItems(size int64) *Header { + h.MinItems = &size + return h +} + +// UniqueValues dictates that this array can only have unique items +func (h *Header) UniqueValues() *Header { + h.UniqueItems = true + return h +} + +// AllowDuplicates this array can have duplicates +func (h *Header) AllowDuplicates() *Header { + h.UniqueItems = false + return h +} + +// WithValidations is a fluent method to set header validations +func (h *Header) WithValidations(val CommonValidations) *Header { + h.SetValidations(SchemaValidations{CommonValidations: val}) + return h +} + +// MarshalJSON marshal this to JSON +func (h Header) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(h.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(h.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(h.HeaderProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +// UnmarshalJSON unmarshals this header from JSON +func (h *Header) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &h.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &h.HeaderProps) +} + +// JSONLookup look up a value by the json property name +func (h Header) JSONLookup(token string) (interface{}, error) { + if ex, ok := h.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(h.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.HeaderProps, token) + return r, err +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/info.go b/test/tools/vendor/github.com/go-openapi/spec/info.go new file mode 100644 index 0000000000..582f0fd4c4 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/info.go @@ -0,0 +1,184 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Extensions vendor specific extensions +type Extensions map[string]interface{} + +// Add adds a value to these extensions +func (e Extensions) Add(key string, value interface{}) { + realKey := strings.ToLower(key) + e[realKey] = value +} + +// GetString gets a string value from the extensions +func (e Extensions) GetString(key string) (string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(string) + return str, ok + } + return "", false +} + +// GetInt gets a int value from the extensions +func (e Extensions) GetInt(key string) (int, bool) { + realKey := strings.ToLower(key) + + if v, ok := e.GetString(realKey); ok { + if r, err := strconv.Atoi(v); err == nil { + return r, true + } + } + + if v, ok := e[realKey]; ok { + if r, rOk := v.(float64); rOk { + return int(r), true + } + } + return -1, false +} + +// GetBool gets a string value from the extensions +func (e Extensions) GetBool(key string) (bool, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(bool) + return str, ok + } + return false, false +} + +// GetStringSlice gets a string value from the extensions +func (e Extensions) GetStringSlice(key string) ([]string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + arr, isSlice := v.([]interface{}) + if !isSlice { + return nil, false + } + var strs []string + for _, iface := range arr { + str, isString := iface.(string) + if !isString { + return nil, false + } + strs = append(strs, str) + } + return strs, ok + } + return nil, false +} + +// VendorExtensible composition block. +type VendorExtensible struct { + Extensions Extensions +} + +// AddExtension adds an extension to this extensible object +func (v *VendorExtensible) AddExtension(key string, value interface{}) { + if value == nil { + return + } + if v.Extensions == nil { + v.Extensions = make(map[string]interface{}) + } + v.Extensions.Add(key, value) +} + +// MarshalJSON marshals the extensions to json +func (v VendorExtensible) MarshalJSON() ([]byte, error) { + toser := make(map[string]interface{}) + for k, v := range v.Extensions { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + toser[k] = v + } + } + return json.Marshal(toser) +} + +// UnmarshalJSON for this extensible object +func (v *VendorExtensible) UnmarshalJSON(data []byte) error { + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if v.Extensions == nil { + v.Extensions = map[string]interface{}{} + } + v.Extensions[k] = vv + } + } + return nil +} + +// InfoProps the properties for an info definition +type InfoProps struct { + Description string `json:"description,omitempty"` + Title string `json:"title,omitempty"` + TermsOfService string `json:"termsOfService,omitempty"` + Contact *ContactInfo `json:"contact,omitempty"` + License *License `json:"license,omitempty"` + Version string `json:"version,omitempty"` +} + +// Info object provides metadata about the API. +// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. +// +// For more information: http://goo.gl/8us55a#infoObject +type Info struct { + VendorExtensible + InfoProps +} + +// JSONLookup look up a value by the json property name +func (i Info) JSONLookup(token string) (interface{}, error) { + if ex, ok := i.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(i.InfoProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (i Info) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.InfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (i *Info) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &i.InfoProps); err != nil { + return err + } + return json.Unmarshal(data, &i.VendorExtensible) +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/items.go b/test/tools/vendor/github.com/go-openapi/spec/items.go new file mode 100644 index 0000000000..e2afb2133b --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/items.go @@ -0,0 +1,234 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + jsonRef = "$ref" +) + +// SimpleSchema describe swagger simple schemas for parameters and headers +type SimpleSchema struct { + Type string `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Items *Items `json:"items,omitempty"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default interface{} `json:"default,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// TypeName return the type (or format) of a simple schema +func (s *SimpleSchema) TypeName() string { + if s.Format != "" { + return s.Format + } + return s.Type +} + +// ItemsTypeName yields the type of items in a simple schema array +func (s *SimpleSchema) ItemsTypeName() string { + if s.Items == nil { + return "" + } + return s.Items.TypeName() +} + +// Items a limited subset of JSON-Schema's items object. +// It is used by parameter definitions that are not located in "body". +// +// For more information: http://goo.gl/8us55a#items-object +type Items struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible +} + +// NewItems creates a new instance of items +func NewItems() *Items { + return &Items{} +} + +// Typed a fluent builder method for the type of item +func (i *Items) Typed(tpe, format string) *Items { + i.Type = tpe + i.Format = format + return i +} + +// AsNullable flags this schema as nullable. +func (i *Items) AsNullable() *Items { + i.Nullable = true + return i +} + +// CollectionOf a fluent builder method for an array item +func (i *Items) CollectionOf(items *Items, format string) *Items { + i.Type = jsonArray + i.Items = items + i.CollectionFormat = format + return i +} + +// WithDefault sets the default value on this item +func (i *Items) WithDefault(defaultValue interface{}) *Items { + i.Default = defaultValue + return i +} + +// WithMaxLength sets a max length value +func (i *Items) WithMaxLength(max int64) *Items { + i.MaxLength = &max + return i +} + +// WithMinLength sets a min length value +func (i *Items) WithMinLength(min int64) *Items { + i.MinLength = &min + return i +} + +// WithPattern sets a pattern value +func (i *Items) WithPattern(pattern string) *Items { + i.Pattern = pattern + return i +} + +// WithMultipleOf sets a multiple of value +func (i *Items) WithMultipleOf(number float64) *Items { + i.MultipleOf = &number + return i +} + +// WithMaximum sets a maximum number value +func (i *Items) WithMaximum(max float64, exclusive bool) *Items { + i.Maximum = &max + i.ExclusiveMaximum = exclusive + return i +} + +// WithMinimum sets a minimum number value +func (i *Items) WithMinimum(min float64, exclusive bool) *Items { + i.Minimum = &min + i.ExclusiveMinimum = exclusive + return i +} + +// WithEnum sets a the enum values (replace) +func (i *Items) WithEnum(values ...interface{}) *Items { + i.Enum = append([]interface{}{}, values...) + return i +} + +// WithMaxItems sets the max items +func (i *Items) WithMaxItems(size int64) *Items { + i.MaxItems = &size + return i +} + +// WithMinItems sets the min items +func (i *Items) WithMinItems(size int64) *Items { + i.MinItems = &size + return i +} + +// UniqueValues dictates that this array can only have unique items +func (i *Items) UniqueValues() *Items { + i.UniqueItems = true + return i +} + +// AllowDuplicates this array can have duplicates +func (i *Items) AllowDuplicates() *Items { + i.UniqueItems = false + return i +} + +// WithValidations is a fluent method to set Items validations +func (i *Items) WithValidations(val CommonValidations) *Items { + i.SetValidations(SchemaValidations{CommonValidations: val}) + return i +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (i *Items) UnmarshalJSON(data []byte) error { + var validations CommonValidations + if err := json.Unmarshal(data, &validations); err != nil { + return err + } + var ref Refable + if err := json.Unmarshal(data, &ref); err != nil { + return err + } + var simpleSchema SimpleSchema + if err := json.Unmarshal(data, &simpleSchema); err != nil { + return err + } + var vendorExtensible VendorExtensible + if err := json.Unmarshal(data, &vendorExtensible); err != nil { + return err + } + i.Refable = ref + i.CommonValidations = validations + i.SimpleSchema = simpleSchema + i.VendorExtensible = vendorExtensible + return nil +} + +// MarshalJSON converts this items object to JSON +func (i Items) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(i.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b4, b3, b1, b2), nil +} + +// JSONLookup look up a value by the json property name +func (i Items) JSONLookup(token string) (interface{}, error) { + if token == jsonRef { + return &i.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(i.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(i.SimpleSchema, token) + return r, err +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/license.go b/test/tools/vendor/github.com/go-openapi/spec/license.go new file mode 100644 index 0000000000..b42f80368e --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/license.go @@ -0,0 +1,56 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" +) + +// License information for the exposed API. +// +// For more information: http://goo.gl/8us55a#licenseObject +type License struct { + LicenseProps + VendorExtensible +} + +// LicenseProps holds the properties of a License object +type LicenseProps struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` +} + +// UnmarshalJSON hydrates License from json +func (l *License) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &l.LicenseProps); err != nil { + return err + } + return json.Unmarshal(data, &l.VendorExtensible) +} + +// MarshalJSON produces License as json +func (l License) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(l.LicenseProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(l.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/normalizer.go b/test/tools/vendor/github.com/go-openapi/spec/normalizer.go new file mode 100644 index 0000000000..e8b6009945 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/normalizer.go @@ -0,0 +1,202 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "net/url" + "path" + "strings" +) + +const fileScheme = "file" + +// normalizeURI ensures that all $ref paths used internally by the expander are canonicalized. +// +// NOTE(windows): there is a tolerance over the strict URI format on windows. +// +// The normalizer accepts relative file URLs like 'Path\File.JSON' as well as absolute file URLs like +// 'C:\Path\file.Yaml'. +// +// Both are canonicalized with a "file://" scheme, slashes and a lower-cased path: +// 'file:///c:/path/file.yaml' +// +// URLs can be specified with a file scheme, like in 'file:///folder/file.json' or +// 'file:///c:\folder\File.json'. +// +// URLs like file://C:\folder are considered invalid (i.e. there is no host 'c:\folder') and a "repair" +// is attempted. +// +// The base path argument is assumed to be canonicalized (e.g. using normalizeBase()). +func normalizeURI(refPath, base string) string { + refURL, err := parseURL(refPath) + if err != nil { + specLogger.Printf("warning: invalid URI in $ref %q: %v", refPath, err) + refURL, refPath = repairURI(refPath) + } + + fixWindowsURI(refURL, refPath) // noop on non-windows OS + + refURL.Path = path.Clean(refURL.Path) + if refURL.Path == "." { + refURL.Path = "" + } + + r := MustCreateRef(refURL.String()) + if r.IsCanonical() { + return refURL.String() + } + + baseURL, _ := parseURL(base) + if path.IsAbs(refURL.Path) { + baseURL.Path = refURL.Path + } else if refURL.Path != "" { + baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) + } + // copying fragment from ref to base + baseURL.Fragment = refURL.Fragment + + return baseURL.String() +} + +// denormalizeRef returns the simplest notation for a normalized $ref, given the path of the original root document. +// +// When calling this, we assume that: +// * $ref is a canonical URI +// * originalRelativeBase is a canonical URI +// +// denormalizeRef is currently used when we rewrite a $ref after a circular $ref has been detected. +// In this case, expansion stops and normally renders the internal canonical $ref. +// +// This internal $ref is eventually rebased to the original RelativeBase used for the expansion. +// +// There is a special case for schemas that are anchored with an "id": +// in that case, the rebasing is performed // against the id only if this is an anchor for the initial root document. +// All other intermediate "id"'s found along the way are ignored for the purpose of rebasing. +func denormalizeRef(ref *Ref, originalRelativeBase, id string) Ref { + debugLog("denormalizeRef called:\n$ref: %q\noriginal: %s\nroot ID:%s", ref.String(), originalRelativeBase, id) + + if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { + // short circuit: $ref to current doc + return *ref + } + + if id != "" { + idBaseURL, err := parseURL(id) + if err == nil { // if the schema id is not usable as a URI, ignore it + if ref, ok := rebase(ref, idBaseURL, true); ok { // rebase, but keep references to root unchaged (do not want $ref: "") + // $ref relative to the ID of the schema in the root document + return ref + } + } + } + + originalRelativeBaseURL, _ := parseURL(originalRelativeBase) + + r, _ := rebase(ref, originalRelativeBaseURL, false) + + return r +} + +func rebase(ref *Ref, v *url.URL, notEqual bool) (Ref, bool) { + var newBase url.URL + + u := ref.GetURL() + + if u.Scheme != v.Scheme || u.Host != v.Host { + return *ref, false + } + + docPath := v.Path + v.Path = path.Dir(v.Path) + + if v.Path == "." { + v.Path = "" + } else if !strings.HasSuffix(v.Path, "/") { + v.Path += "/" + } + + newBase.Fragment = u.Fragment + + if strings.HasPrefix(u.Path, docPath) { + newBase.Path = strings.TrimPrefix(u.Path, docPath) + } else { + newBase.Path = strings.TrimPrefix(u.Path, v.Path) + } + + if notEqual && newBase.Path == "" && newBase.Fragment == "" { + // do not want rebasing to end up in an empty $ref + return *ref, false + } + + if path.IsAbs(newBase.Path) { + // whenever we end up with an absolute path, specify the scheme and host + newBase.Scheme = v.Scheme + newBase.Host = v.Host + } + + return MustCreateRef(newBase.String()), true +} + +// normalizeRef canonicalize a Ref, using a canonical relativeBase as its absolute anchor +func normalizeRef(ref *Ref, relativeBase string) *Ref { + r := MustCreateRef(normalizeURI(ref.String(), relativeBase)) + return &r +} + +// normalizeBase performs a normalization of the input base path. +// +// This always yields a canonical URI (absolute), usable for the document cache. +// +// It ensures that all further internal work on basePath may safely assume +// a non-empty, cross-platform, canonical URI (i.e. absolute). +// +// This normalization tolerates windows paths (e.g. C:\x\y\File.dat) and transform this +// in a file:// URL with lower cased drive letter and path. +// +// See also: https://en.wikipedia.org/wiki/File_URI_scheme +func normalizeBase(in string) string { + u, err := parseURL(in) + if err != nil { + specLogger.Printf("warning: invalid URI in RelativeBase %q: %v", in, err) + u, in = repairURI(in) + } + + u.Fragment = "" // any fragment in the base is irrelevant + + fixWindowsURI(u, in) // noop on non-windows OS + + u.Path = path.Clean(u.Path) + if u.Path == "." { // empty after Clean() + u.Path = "" + } + + if u.Scheme != "" { + if path.IsAbs(u.Path) || u.Scheme != fileScheme { + // this is absolute or explicitly not a local file: we're good + return u.String() + } + } + + // no scheme or file scheme with relative path: assume file and make it absolute + // enforce scheme file://... with absolute path. + // + // If the input path is relative, we anchor the path to the current working directory. + // NOTE: we may end up with a host component. Leave it unchanged: e.g. file://host/folder/file.json + + u.Scheme = fileScheme + u.Path = absPath(u.Path) // platform-dependent + u.RawQuery = "" // any query component is irrelevant for a base + return u.String() +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/test/tools/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go new file mode 100644 index 0000000000..2df0723154 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go @@ -0,0 +1,44 @@ +//go:build !windows +// +build !windows + +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "net/url" + "path/filepath" +) + +// absPath makes a file path absolute and compatible with a URI path component. +// +// The parameter must be a path, not an URI. +func absPath(in string) string { + anchored, err := filepath.Abs(in) + if err != nil { + specLogger.Printf("warning: could not resolve current working directory: %v", err) + return in + } + return anchored +} + +func repairURI(in string) (*url.URL, string) { + u, _ := parseURL("") + debugLog("repaired URI: original: %q, repaired: %q", in, "") + return u, "" +} + +func fixWindowsURI(u *url.URL, in string) { +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/normalizer_windows.go b/test/tools/vendor/github.com/go-openapi/spec/normalizer_windows.go new file mode 100644 index 0000000000..a66c532dbc --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/normalizer_windows.go @@ -0,0 +1,154 @@ +// -build windows + +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "net/url" + "os" + "path" + "path/filepath" + "strings" +) + +// absPath makes a file path absolute and compatible with a URI path component +// +// The parameter must be a path, not an URI. +func absPath(in string) string { + // NOTE(windows): filepath.Abs exhibits a special behavior on windows for empty paths. + // See https://github.com/golang/go/issues/24441 + if in == "" { + in = "." + } + + anchored, err := filepath.Abs(in) + if err != nil { + specLogger.Printf("warning: could not resolve current working directory: %v", err) + return in + } + + pth := strings.ReplaceAll(strings.ToLower(anchored), `\`, `/`) + if !strings.HasPrefix(pth, "/") { + pth = "/" + pth + } + + return path.Clean(pth) +} + +// repairURI tolerates invalid file URIs with common typos +// such as 'file://E:\folder\file', that break the regular URL parser. +// +// Adopting the same defaults as for unixes (e.g. return an empty path) would +// result into a counter-intuitive result for that case (e.g. E:\folder\file is +// eventually resolved as the current directory). The repair will detect the missing "/". +// +// Note that this only works for the file scheme. +func repairURI(in string) (*url.URL, string) { + const prefix = fileScheme + "://" + if !strings.HasPrefix(in, prefix) { + // giving up: resolve to empty path + u, _ := parseURL("") + + return u, "" + } + + // attempt the repair, stripping the scheme should be sufficient + u, _ := parseURL(strings.TrimPrefix(in, prefix)) + debugLog("repaired URI: original: %q, repaired: %q", in, u.String()) + + return u, u.String() +} + +// fixWindowsURI tolerates an absolute file path on windows such as C:\Base\File.yaml or \\host\share\Base\File.yaml +// and makes it a canonical URI: file:///c:/base/file.yaml +// +// Catch 22 notes for Windows: +// +// * There may be a drive letter on windows (it is lower-cased) +// * There may be a share UNC, e.g. \\server\folder\data.xml +// * Paths are case insensitive +// * Paths may already contain slashes +// * Paths must be slashed +// +// NOTE: there is no escaping. "/" may be valid separators just like "\". +// We don't use ToSlash() (which escapes everything) because windows now also +// tolerates the use of "/". Hence, both C:\File.yaml and C:/File.yaml will work. +func fixWindowsURI(u *url.URL, in string) { + drive := filepath.VolumeName(in) + + if len(drive) > 0 { + if len(u.Scheme) == 1 && strings.EqualFold(u.Scheme, drive[:1]) { // a path with a drive letter + u.Scheme = fileScheme + u.Host = "" + u.Path = strings.Join([]string{drive, u.Opaque, u.Path}, `/`) // reconstruct the full path component (no fragment, no query) + } else if u.Host == "" && strings.HasPrefix(u.Path, drive) { // a path with a \\host volume + // NOTE: the special host@port syntax for UNC is not supported (yet) + u.Scheme = fileScheme + + // this is a modified version of filepath.Dir() to apply on the VolumeName itself + i := len(drive) - 1 + for i >= 0 && !os.IsPathSeparator(drive[i]) { + i-- + } + host := drive[:i] // \\host\share => host + + u.Path = strings.TrimPrefix(u.Path, host) + u.Host = strings.TrimPrefix(host, `\\`) + } + + u.Opaque = "" + u.Path = strings.ReplaceAll(strings.ToLower(u.Path), `\`, `/`) + + // ensure we form an absolute path + if !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + + u.Path = path.Clean(u.Path) + + return + } + + if u.Scheme == fileScheme { + // Handle dodgy cases for file://{...} URIs on windows. + // A canonical URI should always be followed by an absolute path. + // + // Examples: + // * file:///folder/file => valid, unchanged + // * file:///c:\folder\file => slashed + // * file:///./folder/file => valid, cleaned to remove the dot + // * file:///.\folder\file => remapped to cwd + // * file:///. => dodgy, remapped to / (consistent with the behavior on unix) + // * file:///.. => dodgy, remapped to / (consistent with the behavior on unix) + if (!path.IsAbs(u.Path) && !filepath.IsAbs(u.Path)) || (strings.HasPrefix(u.Path, `/.`) && strings.Contains(u.Path, `\`)) { + // ensure we form an absolute path + u.Path, _ = filepath.Abs(strings.TrimLeft(u.Path, `/`)) + if !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + } + u.Path = strings.ToLower(u.Path) + } + + // NOTE: lower case normalization does not propagate to inner resources, + // generated when rebasing: when joining a relative URI with a file to an absolute base, + // only the base is currently lower-cased. + // + // For now, we assume this is good enough for most use cases + // and try not to generate too many differences + // between the output produced on different platforms. + u.Path = path.Clean(strings.ReplaceAll(u.Path, `\`, `/`)) +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/operation.go b/test/tools/vendor/github.com/go-openapi/spec/operation.go new file mode 100644 index 0000000000..995ce6acb1 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/operation.go @@ -0,0 +1,397 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "sort" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +func init() { + gob.Register(map[string]interface{}{}) + gob.Register([]interface{}{}) +} + +// OperationProps describes an operation +// +// NOTES: +// - schemes, when present must be from [http, https, ws, wss]: see validate +// - Security is handled as a special case: see MarshalJSON function +type OperationProps struct { + Description string `json:"description,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + ID string `json:"operationId,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + Responses *Responses `json:"responses,omitempty"` +} + +// MarshalJSON takes care of serializing operation properties to JSON +// +// We use a custom marhaller here to handle a special cases related to +// the Security field. We need to preserve zero length slice +// while omitting the field when the value is nil/unset. +func (op OperationProps) MarshalJSON() ([]byte, error) { + type Alias OperationProps + if op.Security == nil { + return json.Marshal(&struct { + Security []map[string][]string `json:"security,omitempty"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) + } + return json.Marshal(&struct { + Security []map[string][]string `json:"security"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) +} + +// Operation describes a single API operation on a path. +// +// For more information: http://goo.gl/8us55a#operationObject +type Operation struct { + VendorExtensible + OperationProps +} + +// SuccessResponse gets a success response model +func (o *Operation) SuccessResponse() (*Response, int, bool) { + if o.Responses == nil { + return nil, 0, false + } + + responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses)) + for k := range o.Responses.StatusCodeResponses { + if k >= 200 && k < 300 { + responseCodes = append(responseCodes, k) + } + } + if len(responseCodes) > 0 { + sort.Ints(responseCodes) + v := o.Responses.StatusCodeResponses[responseCodes[0]] + return &v, responseCodes[0], true + } + + return o.Responses.Default, 0, false +} + +// JSONLookup look up a value by the json property name +func (o Operation) JSONLookup(token string) (interface{}, error) { + if ex, ok := o.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(o.OperationProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (o *Operation) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &o.OperationProps); err != nil { + return err + } + return json.Unmarshal(data, &o.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (o Operation) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(o.OperationProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(o.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +// NewOperation creates a new operation instance. +// It expects an ID as parameter but not passing an ID is also valid. +func NewOperation(id string) *Operation { + op := new(Operation) + op.ID = id + return op +} + +// WithID sets the ID property on this operation, allows for chaining. +func (o *Operation) WithID(id string) *Operation { + o.ID = id + return o +} + +// WithDescription sets the description on this operation, allows for chaining +func (o *Operation) WithDescription(description string) *Operation { + o.Description = description + return o +} + +// WithSummary sets the summary on this operation, allows for chaining +func (o *Operation) WithSummary(summary string) *Operation { + o.Summary = summary + return o +} + +// WithExternalDocs sets/removes the external docs for/from this operation. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (o *Operation) WithExternalDocs(description, url string) *Operation { + if description == "" && url == "" { + o.ExternalDocs = nil + return o + } + + if o.ExternalDocs == nil { + o.ExternalDocs = &ExternalDocumentation{} + } + o.ExternalDocs.Description = description + o.ExternalDocs.URL = url + return o +} + +// Deprecate marks the operation as deprecated +func (o *Operation) Deprecate() *Operation { + o.Deprecated = true + return o +} + +// Undeprecate marks the operation as not deprected +func (o *Operation) Undeprecate() *Operation { + o.Deprecated = false + return o +} + +// WithConsumes adds media types for incoming body values +func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { + o.Consumes = append(o.Consumes, mediaTypes...) + return o +} + +// WithProduces adds media types for outgoing body values +func (o *Operation) WithProduces(mediaTypes ...string) *Operation { + o.Produces = append(o.Produces, mediaTypes...) + return o +} + +// WithTags adds tags for this operation +func (o *Operation) WithTags(tags ...string) *Operation { + o.Tags = append(o.Tags, tags...) + return o +} + +// AddParam adds a parameter to this operation, when a parameter for that location +// and with that name already exists it will be replaced +func (o *Operation) AddParam(param *Parameter) *Operation { + if param == nil { + return o + } + + for i, p := range o.Parameters { + if p.Name == param.Name && p.In == param.In { + params := append(o.Parameters[:i], *param) + params = append(params, o.Parameters[i+1:]...) + o.Parameters = params + return o + } + } + + o.Parameters = append(o.Parameters, *param) + return o +} + +// RemoveParam removes a parameter from the operation +func (o *Operation) RemoveParam(name, in string) *Operation { + for i, p := range o.Parameters { + if p.Name == name && p.In == in { + o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) + return o + } + } + return o +} + +// SecuredWith adds a security scope to this operation. +func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { + o.Security = append(o.Security, map[string][]string{name: scopes}) + return o +} + +// WithDefaultResponse adds a default response to the operation. +// Passing a nil value will remove the response +func (o *Operation) WithDefaultResponse(response *Response) *Operation { + return o.RespondsWith(0, response) +} + +// RespondsWith adds a status code response to the operation. +// When the code is 0 the value of the response will be used as default response value. +// When the value of the response is nil it will be removed from the operation +func (o *Operation) RespondsWith(code int, response *Response) *Operation { + if o.Responses == nil { + o.Responses = new(Responses) + } + if code == 0 { + o.Responses.Default = response + return o + } + if response == nil { + delete(o.Responses.StatusCodeResponses, code) + return o + } + if o.Responses.StatusCodeResponses == nil { + o.Responses.StatusCodeResponses = make(map[int]Response) + } + o.Responses.StatusCodeResponses[code] = *response + return o +} + +type opsAlias OperationProps + +type gobAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *opsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (o Operation) GobEncode() ([]byte, error) { + raw := struct { + Ext VendorExtensible + Props OperationProps + }{ + Ext: o.VendorExtensible, + Props: o.OperationProps, + } + var b bytes.Buffer + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (o *Operation) GobDecode(b []byte) error { + var raw struct { + Ext VendorExtensible + Props OperationProps + } + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + o.VendorExtensible = raw.Ext + o.OperationProps = raw.Props + return nil +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (op OperationProps) GobEncode() ([]byte, error) { + raw := gobAlias{ + Alias: (*opsAlias)(&op), + } + + var b bytes.Buffer + if op.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(op.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(op.Security)) + for _, req := range op.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (op *OperationProps) GobDecode(b []byte) error { + var raw gobAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *op = *(*OperationProps)(raw.Alias) + return nil +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/parameter.go b/test/tools/vendor/github.com/go-openapi/spec/parameter.go new file mode 100644 index 0000000000..2b2b89b67b --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/parameter.go @@ -0,0 +1,326 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// QueryParam creates a query parameter +func QueryParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} +} + +// HeaderParam creates a header parameter, this is always required by default +func HeaderParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} +} + +// PathParam creates a path parameter, this is always required +func PathParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} +} + +// BodyParam creates a body parameter +func BodyParam(name string, schema *Schema) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}} +} + +// FormDataParam creates a body parameter +func FormDataParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} +} + +// FileParam creates a body parameter +func FileParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, + SimpleSchema: SimpleSchema{Type: "file"}} +} + +// SimpleArrayParam creates a param for a simple array (string, int, date etc) +func SimpleArrayParam(name, tpe, fmt string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name}, + SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", + Items: &Items{SimpleSchema: SimpleSchema{Type: tpe, Format: fmt}}}} +} + +// ParamRef creates a parameter that's a json reference +func ParamRef(uri string) *Parameter { + p := new(Parameter) + p.Ref = MustCreateRef(uri) + return p +} + +// ParamProps describes the specific attributes of an operation parameter +// +// NOTE: +// - Schema is defined when "in" == "body": see validate +// - AllowEmptyValue is allowed where "in" == "query" || "formData" +type ParamProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Required bool `json:"required,omitempty"` + Schema *Schema `json:"schema,omitempty"` + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` +} + +// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). +// +// There are five possible parameter types. +// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. +// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. +// * Header - Custom headers that are expected as part of the request. +// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. +// +// For more information: http://goo.gl/8us55a#parameterObject +type Parameter struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible + ParamProps +} + +// JSONLookup look up a value by the json property name +func (p Parameter) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.ParamProps, token) + return r, err +} + +// WithDescription a fluent builder method for the description of the parameter +func (p *Parameter) WithDescription(description string) *Parameter { + p.Description = description + return p +} + +// Named a fluent builder method to override the name of the parameter +func (p *Parameter) Named(name string) *Parameter { + p.Name = name + return p +} + +// WithLocation a fluent builder method to override the location of the parameter +func (p *Parameter) WithLocation(in string) *Parameter { + p.In = in + return p +} + +// Typed a fluent builder method for the type of the parameter value +func (p *Parameter) Typed(tpe, format string) *Parameter { + p.Type = tpe + p.Format = format + return p +} + +// CollectionOf a fluent builder method for an array parameter +func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { + p.Type = jsonArray + p.Items = items + p.CollectionFormat = format + return p +} + +// WithDefault sets the default value on this parameter +func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter { + p.AsOptional() // with default implies optional + p.Default = defaultValue + return p +} + +// AllowsEmptyValues flags this parameter as being ok with empty values +func (p *Parameter) AllowsEmptyValues() *Parameter { + p.AllowEmptyValue = true + return p +} + +// NoEmptyValues flags this parameter as not liking empty values +func (p *Parameter) NoEmptyValues() *Parameter { + p.AllowEmptyValue = false + return p +} + +// AsOptional flags this parameter as optional +func (p *Parameter) AsOptional() *Parameter { + p.Required = false + return p +} + +// AsRequired flags this parameter as required +func (p *Parameter) AsRequired() *Parameter { + if p.Default != nil { // with a default required makes no sense + return p + } + p.Required = true + return p +} + +// WithMaxLength sets a max length value +func (p *Parameter) WithMaxLength(max int64) *Parameter { + p.MaxLength = &max + return p +} + +// WithMinLength sets a min length value +func (p *Parameter) WithMinLength(min int64) *Parameter { + p.MinLength = &min + return p +} + +// WithPattern sets a pattern value +func (p *Parameter) WithPattern(pattern string) *Parameter { + p.Pattern = pattern + return p +} + +// WithMultipleOf sets a multiple of value +func (p *Parameter) WithMultipleOf(number float64) *Parameter { + p.MultipleOf = &number + return p +} + +// WithMaximum sets a maximum number value +func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter { + p.Maximum = &max + p.ExclusiveMaximum = exclusive + return p +} + +// WithMinimum sets a minimum number value +func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter { + p.Minimum = &min + p.ExclusiveMinimum = exclusive + return p +} + +// WithEnum sets a the enum values (replace) +func (p *Parameter) WithEnum(values ...interface{}) *Parameter { + p.Enum = append([]interface{}{}, values...) + return p +} + +// WithMaxItems sets the max items +func (p *Parameter) WithMaxItems(size int64) *Parameter { + p.MaxItems = &size + return p +} + +// WithMinItems sets the min items +func (p *Parameter) WithMinItems(size int64) *Parameter { + p.MinItems = &size + return p +} + +// UniqueValues dictates that this array can only have unique items +func (p *Parameter) UniqueValues() *Parameter { + p.UniqueItems = true + return p +} + +// AllowDuplicates this array can have duplicates +func (p *Parameter) AllowDuplicates() *Parameter { + p.UniqueItems = false + return p +} + +// WithValidations is a fluent method to set parameter validations +func (p *Parameter) WithValidations(val CommonValidations) *Parameter { + p.SetValidations(SchemaValidations{CommonValidations: val}) + return p +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Parameter) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.ParamProps) +} + +// MarshalJSON converts this items object to JSON +func (p Parameter) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(p.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.ParamProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b3, b1, b2, b4, b5), nil +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/path_item.go b/test/tools/vendor/github.com/go-openapi/spec/path_item.go new file mode 100644 index 0000000000..68fc8e9014 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/path_item.go @@ -0,0 +1,87 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// PathItemProps the path item specific properties +type PathItemProps struct { + Get *Operation `json:"get,omitempty"` + Put *Operation `json:"put,omitempty"` + Post *Operation `json:"post,omitempty"` + Delete *Operation `json:"delete,omitempty"` + Options *Operation `json:"options,omitempty"` + Head *Operation `json:"head,omitempty"` + Patch *Operation `json:"patch,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` +} + +// PathItem describes the operations available on a single path. +// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// The path itself is still exposed to the documentation viewer but they will +// not know which operations and parameters are available. +// +// For more information: http://goo.gl/8us55a#pathItemObject +type PathItem struct { + Refable + VendorExtensible + PathItemProps +} + +// JSONLookup look up a value by the json property name +func (p PathItem) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *PathItem) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.PathItemProps) +} + +// MarshalJSON converts this items object to JSON +func (p PathItem) MarshalJSON() ([]byte, error) { + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.PathItemProps) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b3, b4, b5) + return concated, nil +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/paths.go b/test/tools/vendor/github.com/go-openapi/spec/paths.go new file mode 100644 index 0000000000..9dc82a2901 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/paths.go @@ -0,0 +1,97 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/swag" +) + +// Paths holds the relative paths to the individual endpoints. +// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order +// to construct the full URL. +// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// +// For more information: http://goo.gl/8us55a#pathsObject +type Paths struct { + VendorExtensible + Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" +} + +// JSONLookup look up a value by the json property name +func (p Paths) JSONLookup(token string) (interface{}, error) { + if pi, ok := p.Paths[token]; ok { + return &pi, nil + } + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Paths) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + for k, v := range res { + if strings.HasPrefix(strings.ToLower(k), "x-") { + if p.Extensions == nil { + p.Extensions = make(map[string]interface{}) + } + var d interface{} + if err := json.Unmarshal(v, &d); err != nil { + return err + } + p.Extensions[k] = d + } + if strings.HasPrefix(k, "/") { + if p.Paths == nil { + p.Paths = make(map[string]PathItem) + } + var pi PathItem + if err := json.Unmarshal(v, &pi); err != nil { + return err + } + p.Paths[k] = pi + } + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (p Paths) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + + pths := make(map[string]PathItem) + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + pths[k] = v + } + } + b2, err := json.Marshal(pths) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/properties.go b/test/tools/vendor/github.com/go-openapi/spec/properties.go new file mode 100644 index 0000000000..91d2435f01 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/properties.go @@ -0,0 +1,91 @@ +package spec + +import ( + "bytes" + "encoding/json" + "reflect" + "sort" +) + +// OrderSchemaItem holds a named schema (e.g. from a property of an object) +type OrderSchemaItem struct { + Name string + Schema +} + +// OrderSchemaItems is a sortable slice of named schemas. +// The ordering is defined by the x-order schema extension. +type OrderSchemaItems []OrderSchemaItem + +// MarshalJSON produces a json object with keys defined by the name schemas +// of the OrderSchemaItems slice, keeping the original order of the slice. +func (items OrderSchemaItems) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(nil) + buf.WriteString("{") + for i := range items { + if i > 0 { + buf.WriteString(",") + } + buf.WriteString("\"") + buf.WriteString(items[i].Name) + buf.WriteString("\":") + bs, err := json.Marshal(&items[i].Schema) + if err != nil { + return nil, err + } + buf.Write(bs) + } + buf.WriteString("}") + return buf.Bytes(), nil +} + +func (items OrderSchemaItems) Len() int { return len(items) } +func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], items[i] } +func (items OrderSchemaItems) Less(i, j int) (ret bool) { + ii, oki := items[i].Extensions.GetInt("x-order") + ij, okj := items[j].Extensions.GetInt("x-order") + if oki { + if okj { + defer func() { + if err := recover(); err != nil { + defer func() { + if err = recover(); err != nil { + ret = items[i].Name < items[j].Name + } + }() + ret = reflect.ValueOf(ii).String() < reflect.ValueOf(ij).String() + } + }() + return ii < ij + } + return true + } else if okj { + return false + } + return items[i].Name < items[j].Name +} + +// SchemaProperties is a map representing the properties of a Schema object. +// It knows how to transform its keys into an ordered slice. +type SchemaProperties map[string]Schema + +// ToOrderedSchemaItems transforms the map of properties into a sortable slice +func (properties SchemaProperties) ToOrderedSchemaItems() OrderSchemaItems { + items := make(OrderSchemaItems, 0, len(properties)) + for k, v := range properties { + items = append(items, OrderSchemaItem{ + Name: k, + Schema: v, + }) + } + sort.Sort(items) + return items +} + +// MarshalJSON produces properties as json, keeping their order. +func (properties SchemaProperties) MarshalJSON() ([]byte, error) { + if properties == nil { + return []byte("null"), nil + } + return json.Marshal(properties.ToOrderedSchemaItems()) +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/ref.go b/test/tools/vendor/github.com/go-openapi/spec/ref.go new file mode 100644 index 0000000000..b0ef9bd9c9 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/ref.go @@ -0,0 +1,193 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "net/http" + "os" + "path/filepath" + + "github.com/go-openapi/jsonreference" +) + +// Refable is a struct for things that accept a $ref property +type Refable struct { + Ref Ref +} + +// MarshalJSON marshals the ref to json +func (r Refable) MarshalJSON() ([]byte, error) { + return r.Ref.MarshalJSON() +} + +// UnmarshalJSON unmarshalss the ref from json +func (r *Refable) UnmarshalJSON(d []byte) error { + return json.Unmarshal(d, &r.Ref) +} + +// Ref represents a json reference that is potentially resolved +type Ref struct { + jsonreference.Ref +} + +// RemoteURI gets the remote uri part of the ref +func (r *Ref) RemoteURI() string { + if r.String() == "" { + return "" + } + + u := *r.GetURL() + u.Fragment = "" + return u.String() +} + +// IsValidURI returns true when the url the ref points to can be found +func (r *Ref) IsValidURI(basepaths ...string) bool { + if r.String() == "" { + return true + } + + v := r.RemoteURI() + if v == "" { + return true + } + + if r.HasFullURL { + //nolint:noctx,gosec + rr, err := http.Get(v) + if err != nil { + return false + } + defer rr.Body.Close() + + return rr.StatusCode/100 == 2 + } + + if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { + return false + } + + // check for local file + pth := v + if r.HasURLPathOnly { + base := "." + if len(basepaths) > 0 { + base = filepath.Dir(filepath.Join(basepaths...)) + } + p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) + if e != nil { + return false + } + pth = p + } + + fi, err := os.Stat(filepath.ToSlash(pth)) + if err != nil { + return false + } + + return !fi.IsDir() +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + ref, err := r.Ref.Inherits(child.Ref) + if err != nil { + return nil, err + } + return &Ref{Ref: *ref}, nil +} + +// NewRef creates a new instance of a ref object +// returns an error when the reference uri is an invalid uri +func NewRef(refURI string) (Ref, error) { + ref, err := jsonreference.New(refURI) + if err != nil { + return Ref{}, err + } + return Ref{Ref: ref}, nil +} + +// MustCreateRef creates a ref object but panics when refURI is invalid. +// Use the NewRef method for a version that returns an error. +func MustCreateRef(refURI string) Ref { + return Ref{Ref: jsonreference.MustCreateRef(refURI)} +} + +// MarshalJSON marshals this ref into a JSON object +func (r Ref) MarshalJSON() ([]byte, error) { + str := r.String() + if str == "" { + if r.IsRoot() { + return []byte(`{"$ref":""}`), nil + } + return []byte("{}"), nil + } + v := map[string]interface{}{"$ref": str} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshals this ref from a JSON object +func (r *Ref) UnmarshalJSON(d []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(d, &v); err != nil { + return err + } + return r.fromMap(v) +} + +// GobEncode provides a safe gob encoder for Ref +func (r Ref) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw, err := r.MarshalJSON() + if err != nil { + return nil, err + } + err = gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Ref +func (r *Ref) GobDecode(b []byte) error { + var raw []byte + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + return json.Unmarshal(raw, r) +} + +func (r *Ref) fromMap(v map[string]interface{}) error { + if v == nil { + return nil + } + + if vv, ok := v["$ref"]; ok { + if str, ok := vv.(string); ok { + ref, err := jsonreference.New(str) + if err != nil { + return err + } + *r = Ref{Ref: ref} + } + } + + return nil +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/resolver.go b/test/tools/vendor/github.com/go-openapi/spec/resolver.go new file mode 100644 index 0000000000..47d1ee13fc --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/resolver.go @@ -0,0 +1,127 @@ +package spec + +import ( + "fmt" + + "github.com/go-openapi/swag" +) + +func resolveAnyWithBase(root interface{}, ref *Ref, result interface{}, options *ExpandOptions) error { + options = optionsOrDefault(options) + resolver := defaultSchemaLoader(root, options, nil, nil) + + if err := resolver.Resolve(ref, result, options.RelativeBase); err != nil { + return err + } + + return nil +} + +// ResolveRefWithBase resolves a reference against a context root with preservation of base path +func ResolveRefWithBase(root interface{}, ref *Ref, options *ExpandOptions) (*Schema, error) { + result := new(Schema) + + if err := resolveAnyWithBase(root, ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveRef resolves a reference for a schema against a context root +// ref is guaranteed to be in root (no need to go to external files) +// +// ResolveRef is ONLY called from the code generation module +func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { + res, _, err := ref.GetPointer().Get(root) + if err != nil { + return nil, err + } + + switch sch := res.(type) { + case Schema: + return &sch, nil + case *Schema: + return sch, nil + case map[string]interface{}: + newSch := new(Schema) + if err = swag.DynamicJSONToStruct(sch, newSch); err != nil { + return nil, err + } + return newSch, nil + default: + return nil, fmt.Errorf("type: %T: %w", sch, ErrUnknownTypeForReference) + } +} + +// ResolveParameterWithBase resolves a parameter reference against a context root and base path +func ResolveParameterWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Parameter, error) { + result := new(Parameter) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveParameter resolves a parameter reference against a context root +func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { + return ResolveParameterWithBase(root, ref, nil) +} + +// ResolveResponseWithBase resolves response a reference against a context root and base path +func ResolveResponseWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Response, error) { + result := new(Response) + + err := resolveAnyWithBase(root, &ref, result, options) + if err != nil { + return nil, err + } + + return result, nil +} + +// ResolveResponse resolves response a reference against a context root +func ResolveResponse(root interface{}, ref Ref) (*Response, error) { + return ResolveResponseWithBase(root, ref, nil) +} + +// ResolvePathItemWithBase resolves response a path item against a context root and base path +func ResolvePathItemWithBase(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) { + result := new(PathItem) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolvePathItem resolves response a path item against a context root and base path +// +// Deprecated: use ResolvePathItemWithBase instead +func ResolvePathItem(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) { + return ResolvePathItemWithBase(root, ref, options) +} + +// ResolveItemsWithBase resolves parameter items reference against a context root and base path. +// +// NOTE: stricly speaking, this construct is not supported by Swagger 2.0. +// Similarly, $ref are forbidden in response headers. +func ResolveItemsWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) { + result := new(Items) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveItems resolves parameter items reference against a context root and base path. +// +// Deprecated: use ResolveItemsWithBase instead +func ResolveItems(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) { + return ResolveItemsWithBase(root, ref, options) +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/response.go b/test/tools/vendor/github.com/go-openapi/spec/response.go new file mode 100644 index 0000000000..0340b60d84 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/response.go @@ -0,0 +1,152 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// ResponseProps properties specific to a response +type ResponseProps struct { + Description string `json:"description"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` +} + +// Response describes a single response from an API Operation. +// +// For more information: http://goo.gl/8us55a#responseObject +type Response struct { + Refable + ResponseProps + VendorExtensible +} + +// JSONLookup look up a value by the json property name +func (r Response) JSONLookup(token string) (interface{}, error) { + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if token == "$ref" { + return &r.Ref, nil + } + ptr, _, err := jsonpointer.GetForToken(r.ResponseProps, token) + return ptr, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Response) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponseProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.Refable); err != nil { + return err + } + return json.Unmarshal(data, &r.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (r Response) MarshalJSON() ([]byte, error) { + var ( + b1 []byte + err error + ) + + if r.Ref.String() == "" { + // when there is no $ref, empty description is rendered as an empty string + b1, err = json.Marshal(r.ResponseProps) + } else { + // when there is $ref inside the schema, description should be omitempty-ied + b1, err = json.Marshal(struct { + Description string `json:"description,omitempty"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` + }{ + Description: r.ResponseProps.Description, + Schema: r.ResponseProps.Schema, + Examples: r.ResponseProps.Examples, + }) + } + if err != nil { + return nil, err + } + + b2, err := json.Marshal(r.Refable) + if err != nil { + return nil, err + } + b3, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +// NewResponse creates a new response instance +func NewResponse() *Response { + return new(Response) +} + +// ResponseRef creates a response as a json reference +func ResponseRef(url string) *Response { + resp := NewResponse() + resp.Ref = MustCreateRef(url) + return resp +} + +// WithDescription sets the description on this response, allows for chaining +func (r *Response) WithDescription(description string) *Response { + r.Description = description + return r +} + +// WithSchema sets the schema on this response, allows for chaining. +// Passing a nil argument removes the schema from this response +func (r *Response) WithSchema(schema *Schema) *Response { + r.Schema = schema + return r +} + +// AddHeader adds a header to this response +func (r *Response) AddHeader(name string, header *Header) *Response { + if header == nil { + return r.RemoveHeader(name) + } + if r.Headers == nil { + r.Headers = make(map[string]Header) + } + r.Headers[name] = *header + return r +} + +// RemoveHeader removes a header from this response +func (r *Response) RemoveHeader(name string) *Response { + delete(r.Headers, name) + return r +} + +// AddExample adds an example to this response +func (r *Response) AddExample(mediaType string, example interface{}) *Response { + if r.Examples == nil { + r.Examples = make(map[string]interface{}) + } + r.Examples[mediaType] = example + return r +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/responses.go b/test/tools/vendor/github.com/go-openapi/spec/responses.go new file mode 100644 index 0000000000..16c3076fe8 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/responses.go @@ -0,0 +1,140 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/go-openapi/swag" +) + +// Responses is a container for the expected responses of an operation. +// The container maps a HTTP response code to the expected response. +// It is not expected from the documentation to necessarily cover all possible HTTP response codes, +// since they may not be known in advance. However, it is expected from the documentation to cover +// a successful operation response and any known errors. +// +// The `default` can be used a default response object for all HTTP codes that are not covered +// individually by the specification. +// +// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response +// for a successful operation call. +// +// For more information: http://goo.gl/8us55a#responsesObject +type Responses struct { + VendorExtensible + ResponsesProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (r Responses) JSONLookup(token string) (interface{}, error) { + if token == "default" { + return r.Default, nil + } + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if i, err := strconv.Atoi(token); err == nil { + if scr, ok := r.StatusCodeResponses[i]; ok { + return scr, nil + } + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Responses) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { + return err + } + + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { + r.ResponsesProps = ResponsesProps{} + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (r Responses) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.ResponsesProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +// ResponsesProps describes all responses for an operation. +// It tells what is the default response and maps all responses with a +// HTTP status code. +type ResponsesProps struct { + Default *Response + StatusCodeResponses map[int]Response +} + +// MarshalJSON marshals responses as JSON +func (r ResponsesProps) MarshalJSON() ([]byte, error) { + toser := map[string]Response{} + if r.Default != nil { + toser["default"] = *r.Default + } + for k, v := range r.StatusCodeResponses { + toser[strconv.Itoa(k)] = v + } + return json.Marshal(toser) +} + +// UnmarshalJSON unmarshals responses from JSON +func (r *ResponsesProps) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + if v, ok := res["default"]; ok { + var defaultRes Response + if err := json.Unmarshal(v, &defaultRes); err != nil { + return err + } + r.Default = &defaultRes + delete(res, "default") + } + for k, v := range res { + if !strings.HasPrefix(k, "x-") { + var statusCodeResp Response + if err := json.Unmarshal(v, &statusCodeResp); err != nil { + return err + } + if nk, err := strconv.Atoi(k); err == nil { + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]Response{} + } + r.StatusCodeResponses[nk] = statusCodeResp + } + } + } + return nil +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/schema.go b/test/tools/vendor/github.com/go-openapi/spec/schema.go new file mode 100644 index 0000000000..4e9be8576b --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/schema.go @@ -0,0 +1,645 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// BooleanProperty creates a boolean property +func BooleanProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} +} + +// BoolProperty creates a boolean property +func BoolProperty() *Schema { return BooleanProperty() } + +// StringProperty creates a string property +func StringProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// CharProperty creates a string property +func CharProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// Float64Property creates a float64/double property +func Float64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} +} + +// Float32Property creates a float32/float property +func Float32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} +} + +// Int8Property creates an int8 property +func Int8Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} +} + +// Int16Property creates an int16 property +func Int16Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} +} + +// Int32Property creates an int32 property +func Int32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} +} + +// Int64Property creates an int64 property +func Int64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} +} + +// StrFmtProperty creates a property for the named string format +func StrFmtProperty(format string) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} +} + +// DateProperty creates a date property +func DateProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} +} + +// DateTimeProperty creates a date time property +func DateTimeProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} +} + +// MapProperty creates a map property +func MapProperty(property *Schema) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, + AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} +} + +// RefProperty creates a ref property +func RefProperty(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// RefSchema creates a ref property +func RefSchema(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// ArrayProperty creates an array property +func ArrayProperty(items *Schema) *Schema { + if items == nil { + return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} + } + return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} +} + +// ComposedSchema creates a schema with allOf +func ComposedSchema(schemas ...Schema) *Schema { + s := new(Schema) + s.AllOf = schemas + return s +} + +// SchemaURL represents a schema url +type SchemaURL string + +// MarshalJSON marshal this to JSON +func (r SchemaURL) MarshalJSON() ([]byte, error) { + if r == "" { + return []byte("{}"), nil + } + v := map[string]interface{}{"$schema": string(r)} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshal this from JSON +func (r *SchemaURL) UnmarshalJSON(data []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(data, &v); err != nil { + return err + } + return r.fromMap(v) +} + +func (r *SchemaURL) fromMap(v map[string]interface{}) error { + if v == nil { + return nil + } + if vv, ok := v["$schema"]; ok { + if str, ok := vv.(string); ok { + u, err := parseURL(str) + if err != nil { + return err + } + + *r = SchemaURL(u.String()) + } + } + return nil +} + +// SchemaProps describes a JSON schema (draft 4) +type SchemaProps struct { + ID string `json:"id,omitempty"` + Ref Ref `json:"-"` + Schema SchemaURL `json:"-"` + Description string `json:"description,omitempty"` + Type StringOrArray `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Title string `json:"title,omitempty"` + Default interface{} `json:"default,omitempty"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *SchemaOrArray `json:"items,omitempty"` + AllOf []Schema `json:"allOf,omitempty"` + OneOf []Schema `json:"oneOf,omitempty"` + AnyOf []Schema `json:"anyOf,omitempty"` + Not *Schema `json:"not,omitempty"` + Properties SchemaProperties `json:"properties,omitempty"` + AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` + PatternProperties SchemaProperties `json:"patternProperties,omitempty"` + Dependencies Dependencies `json:"dependencies,omitempty"` + AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` + Definitions Definitions `json:"definitions,omitempty"` +} + +// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) +type SwaggerSchemaProps struct { + Discriminator string `json:"discriminator,omitempty"` + ReadOnly bool `json:"readOnly,omitempty"` + XML *XMLObject `json:"xml,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// Schema the schema object allows the definition of input and output data types. +// These types can be objects, but also primitives and arrays. +// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) +// and uses a predefined subset of it. +// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. +// +// For more information: http://goo.gl/8us55a#schemaObject +type Schema struct { + VendorExtensible + SchemaProps + SwaggerSchemaProps + ExtraProps map[string]interface{} `json:"-"` +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s Schema) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + if ex, ok := s.ExtraProps[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) + if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) { + return r, err + } + r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) + return r, err +} + +// WithID sets the id for this schema, allows for chaining +func (s *Schema) WithID(id string) *Schema { + s.ID = id + return s +} + +// WithTitle sets the title for this schema, allows for chaining +func (s *Schema) WithTitle(title string) *Schema { + s.Title = title + return s +} + +// WithDescription sets the description for this schema, allows for chaining +func (s *Schema) WithDescription(description string) *Schema { + s.Description = description + return s +} + +// WithProperties sets the properties for this schema +func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { + s.Properties = schemas + return s +} + +// SetProperty sets a property on this schema +func (s *Schema) SetProperty(name string, schema Schema) *Schema { + if s.Properties == nil { + s.Properties = make(map[string]Schema) + } + s.Properties[name] = schema + return s +} + +// WithAllOf sets the all of property +func (s *Schema) WithAllOf(schemas ...Schema) *Schema { + s.AllOf = schemas + return s +} + +// WithMaxProperties sets the max number of properties an object can have +func (s *Schema) WithMaxProperties(max int64) *Schema { + s.MaxProperties = &max + return s +} + +// WithMinProperties sets the min number of properties an object must have +func (s *Schema) WithMinProperties(min int64) *Schema { + s.MinProperties = &min + return s +} + +// Typed sets the type of this schema for a single value item +func (s *Schema) Typed(tpe, format string) *Schema { + s.Type = []string{tpe} + s.Format = format + return s +} + +// AddType adds a type with potential format to the types for this schema +func (s *Schema) AddType(tpe, format string) *Schema { + s.Type = append(s.Type, tpe) + if format != "" { + s.Format = format + } + return s +} + +// AsNullable flags this schema as nullable. +func (s *Schema) AsNullable() *Schema { + s.Nullable = true + return s +} + +// CollectionOf a fluent builder method for an array parameter +func (s *Schema) CollectionOf(items Schema) *Schema { + s.Type = []string{jsonArray} + s.Items = &SchemaOrArray{Schema: &items} + return s +} + +// WithDefault sets the default value on this parameter +func (s *Schema) WithDefault(defaultValue interface{}) *Schema { + s.Default = defaultValue + return s +} + +// WithRequired flags this parameter as required +func (s *Schema) WithRequired(items ...string) *Schema { + s.Required = items + return s +} + +// AddRequired adds field names to the required properties array +func (s *Schema) AddRequired(items ...string) *Schema { + s.Required = append(s.Required, items...) + return s +} + +// WithMaxLength sets a max length value +func (s *Schema) WithMaxLength(max int64) *Schema { + s.MaxLength = &max + return s +} + +// WithMinLength sets a min length value +func (s *Schema) WithMinLength(min int64) *Schema { + s.MinLength = &min + return s +} + +// WithPattern sets a pattern value +func (s *Schema) WithPattern(pattern string) *Schema { + s.Pattern = pattern + return s +} + +// WithMultipleOf sets a multiple of value +func (s *Schema) WithMultipleOf(number float64) *Schema { + s.MultipleOf = &number + return s +} + +// WithMaximum sets a maximum number value +func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema { + s.Maximum = &max + s.ExclusiveMaximum = exclusive + return s +} + +// WithMinimum sets a minimum number value +func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema { + s.Minimum = &min + s.ExclusiveMinimum = exclusive + return s +} + +// WithEnum sets a the enum values (replace) +func (s *Schema) WithEnum(values ...interface{}) *Schema { + s.Enum = append([]interface{}{}, values...) + return s +} + +// WithMaxItems sets the max items +func (s *Schema) WithMaxItems(size int64) *Schema { + s.MaxItems = &size + return s +} + +// WithMinItems sets the min items +func (s *Schema) WithMinItems(size int64) *Schema { + s.MinItems = &size + return s +} + +// UniqueValues dictates that this array can only have unique items +func (s *Schema) UniqueValues() *Schema { + s.UniqueItems = true + return s +} + +// AllowDuplicates this array can have duplicates +func (s *Schema) AllowDuplicates() *Schema { + s.UniqueItems = false + return s +} + +// AddToAllOf adds a schema to the allOf property +func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { + s.AllOf = append(s.AllOf, schemas...) + return s +} + +// WithDiscriminator sets the name of the discriminator field +func (s *Schema) WithDiscriminator(discriminator string) *Schema { + s.Discriminator = discriminator + return s +} + +// AsReadOnly flags this schema as readonly +func (s *Schema) AsReadOnly() *Schema { + s.ReadOnly = true + return s +} + +// AsWritable flags this schema as writeable (not read-only) +func (s *Schema) AsWritable() *Schema { + s.ReadOnly = false + return s +} + +// WithExample sets the example for this schema +func (s *Schema) WithExample(example interface{}) *Schema { + s.Example = example + return s +} + +// WithExternalDocs sets/removes the external docs for/from this schema. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (s *Schema) WithExternalDocs(description, url string) *Schema { + if description == "" && url == "" { + s.ExternalDocs = nil + return s + } + + if s.ExternalDocs == nil { + s.ExternalDocs = &ExternalDocumentation{} + } + s.ExternalDocs.Description = description + s.ExternalDocs.URL = url + return s +} + +// WithXMLName sets the xml name for the object +func (s *Schema) WithXMLName(name string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Name = name + return s +} + +// WithXMLNamespace sets the xml namespace for the object +func (s *Schema) WithXMLNamespace(namespace string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Namespace = namespace + return s +} + +// WithXMLPrefix sets the xml prefix for the object +func (s *Schema) WithXMLPrefix(prefix string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Prefix = prefix + return s +} + +// AsXMLAttribute flags this object as xml attribute +func (s *Schema) AsXMLAttribute() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = true + return s +} + +// AsXMLElement flags this object as an xml node +func (s *Schema) AsXMLElement() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = false + return s +} + +// AsWrappedXML flags this object as wrapped, this is mostly useful for array types +func (s *Schema) AsWrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = true + return s +} + +// AsUnwrappedXML flags this object as an xml node +func (s *Schema) AsUnwrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = false + return s +} + +// SetValidations defines all schema validations. +// +// NOTE: Required, ReadOnly, AllOf, AnyOf, OneOf and Not are not considered. +func (s *Schema) SetValidations(val SchemaValidations) { + s.Maximum = val.Maximum + s.ExclusiveMaximum = val.ExclusiveMaximum + s.Minimum = val.Minimum + s.ExclusiveMinimum = val.ExclusiveMinimum + s.MaxLength = val.MaxLength + s.MinLength = val.MinLength + s.Pattern = val.Pattern + s.MaxItems = val.MaxItems + s.MinItems = val.MinItems + s.UniqueItems = val.UniqueItems + s.MultipleOf = val.MultipleOf + s.Enum = val.Enum + s.MinProperties = val.MinProperties + s.MaxProperties = val.MaxProperties + s.PatternProperties = val.PatternProperties +} + +// WithValidations is a fluent method to set schema validations +func (s *Schema) WithValidations(val SchemaValidations) *Schema { + s.SetValidations(val) + return s +} + +// Validations returns a clone of the validations for this schema +func (s Schema) Validations() SchemaValidations { + return SchemaValidations{ + CommonValidations: CommonValidations{ + Maximum: s.Maximum, + ExclusiveMaximum: s.ExclusiveMaximum, + Minimum: s.Minimum, + ExclusiveMinimum: s.ExclusiveMinimum, + MaxLength: s.MaxLength, + MinLength: s.MinLength, + Pattern: s.Pattern, + MaxItems: s.MaxItems, + MinItems: s.MinItems, + UniqueItems: s.UniqueItems, + MultipleOf: s.MultipleOf, + Enum: s.Enum, + }, + MinProperties: s.MinProperties, + MaxProperties: s.MaxProperties, + PatternProperties: s.PatternProperties, + } +} + +// MarshalJSON marshal this to JSON +func (s Schema) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SchemaProps) + if err != nil { + return nil, fmt.Errorf("schema props %v", err) + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, fmt.Errorf("vendor props %v", err) + } + b3, err := s.Ref.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("ref prop %v", err) + } + b4, err := s.Schema.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("schema prop %v", err) + } + b5, err := json.Marshal(s.SwaggerSchemaProps) + if err != nil { + return nil, fmt.Errorf("common validations %v", err) + } + var b6 []byte + if s.ExtraProps != nil { + jj, err := json.Marshal(s.ExtraProps) + if err != nil { + return nil, fmt.Errorf("extra props %v", err) + } + b6 = jj + } + return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *Schema) UnmarshalJSON(data []byte) error { + props := struct { + SchemaProps + SwaggerSchemaProps + }{} + if err := json.Unmarshal(data, &props); err != nil { + return err + } + + sch := Schema{ + SchemaProps: props.SchemaProps, + SwaggerSchemaProps: props.SwaggerSchemaProps, + } + + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + _ = sch.Ref.fromMap(d) + _ = sch.Schema.fromMap(d) + + delete(d, "$ref") + delete(d, "$schema") + for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { + delete(d, pn) + } + + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if sch.Extensions == nil { + sch.Extensions = map[string]interface{}{} + } + sch.Extensions[k] = vv + continue + } + if sch.ExtraProps == nil { + sch.ExtraProps = map[string]interface{}{} + } + sch.ExtraProps[k] = vv + } + + *s = sch + + return nil +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/schema_loader.go b/test/tools/vendor/github.com/go-openapi/spec/schema_loader.go new file mode 100644 index 0000000000..b81175afdf --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/schema_loader.go @@ -0,0 +1,338 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "reflect" + "strings" + + "github.com/go-openapi/swag" +) + +// PathLoader is a function to use when loading remote refs. +// +// This is a package level default. It may be overridden or bypassed by +// specifying the loader in ExpandOptions. +// +// NOTE: if you are using the go-openapi/loads package, it will override +// this value with its own default (a loader to retrieve YAML documents as +// well as JSON ones). +var PathLoader = func(pth string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(pth) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil +} + +// resolverContext allows to share a context during spec processing. +// At the moment, it just holds the index of circular references found. +type resolverContext struct { + // circulars holds all visited circular references, to shortcircuit $ref resolution. + // + // This structure is privately instantiated and needs not be locked against + // concurrent access, unless we chose to implement a parallel spec walking. + circulars map[string]bool + basePath string + loadDoc func(string) (json.RawMessage, error) + rootID string +} + +func newResolverContext(options *ExpandOptions) *resolverContext { + expandOptions := optionsOrDefault(options) + + // path loader may be overridden by options + var loader func(string) (json.RawMessage, error) + if expandOptions.PathLoader == nil { + loader = PathLoader + } else { + loader = expandOptions.PathLoader + } + + return &resolverContext{ + circulars: make(map[string]bool), + basePath: expandOptions.RelativeBase, // keep the root base path in context + loadDoc: loader, + } +} + +type schemaLoader struct { + root interface{} + options *ExpandOptions + cache ResolutionCache + context *resolverContext +} + +func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) *schemaLoader { + if ref.IsRoot() || ref.HasFragmentOnly { + return r + } + + baseRef := MustCreateRef(basePath) + currentRef := normalizeRef(&ref, basePath) + if strings.HasPrefix(currentRef.String(), baseRef.String()) { + return r + } + + // set a new root against which to resolve + rootURL := currentRef.GetURL() + rootURL.Fragment = "" + root, _ := r.cache.Get(rootURL.String()) + + // shallow copy of resolver options to set a new RelativeBase when + // traversing multiple documents + newOptions := r.options + newOptions.RelativeBase = rootURL.String() + + return defaultSchemaLoader(root, newOptions, r.cache, r.context) +} + +func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string { + if transitive != r { + if transitive.options != nil && transitive.options.RelativeBase != "" { + return normalizeBase(transitive.options.RelativeBase) + } + } + + return basePath +} + +func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error { + tgt := reflect.ValueOf(target) + if tgt.Kind() != reflect.Ptr { + return ErrResolveRefNeedsAPointer + } + + if ref.GetURL() == nil { + return nil + } + + var ( + res interface{} + data interface{} + err error + ) + + // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means + // it is pointing somewhere in the root. + root := r.root + if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { + if baseRef, erb := NewRef(basePath); erb == nil { + root, _, _, _ = r.load(baseRef.GetURL()) + } + } + + if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { + data = root + } else { + baseRef := normalizeRef(ref, basePath) + data, _, _, err = r.load(baseRef.GetURL()) + if err != nil { + return err + } + } + + res = data + if ref.String() != "" { + res, _, err = ref.GetPointer().Get(data) + if err != nil { + return err + } + } + return swag.DynamicJSONToStruct(res, target) +} + +func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { + debugLog("loading schema from url: %s", refURL) + toFetch := *refURL + toFetch.Fragment = "" + + var err error + pth := toFetch.String() + normalized := normalizeBase(pth) + debugLog("loading doc from: %s", normalized) + + unescaped, err := url.PathUnescape(normalized) + if err != nil { + return nil, url.URL{}, false, err + } + + u := url.URL{Path: unescaped} + + data, fromCache := r.cache.Get(u.RequestURI()) + if fromCache { + return data, toFetch, fromCache, nil + } + + b, err := r.context.loadDoc(normalized) + if err != nil { + return nil, url.URL{}, false, err + } + + var doc interface{} + if err := json.Unmarshal(b, &doc); err != nil { + return nil, url.URL{}, false, err + } + r.cache.Set(normalized, doc) + + return doc, toFetch, fromCache, nil +} + +// isCircular detects cycles in sequences of $ref. +// +// It relies on a private context (which needs not be locked). +func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { + normalizedRef := normalizeURI(ref.String(), basePath) + if _, ok := r.context.circulars[normalizedRef]; ok { + // circular $ref has been already detected in another explored cycle + foundCycle = true + return + } + foundCycle = swag.ContainsStrings(parentRefs, normalizedRef) // normalized windows url's are lower cased + if foundCycle { + r.context.circulars[normalizedRef] = true + } + return +} + +// Resolve resolves a reference against basePath and stores the result in target. +// +// Resolve is not in charge of following references: it only resolves ref by following its URL. +// +// If the schema the ref is referring to holds nested refs, Resolve doesn't resolve them. +// +// If basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct +func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error { + return r.resolveRef(ref, target, basePath) +} + +func (r *schemaLoader) deref(input interface{}, parentRefs []string, basePath string) error { + var ref *Ref + switch refable := input.(type) { + case *Schema: + ref = &refable.Ref + case *Parameter: + ref = &refable.Ref + case *Response: + ref = &refable.Ref + case *PathItem: + ref = &refable.Ref + default: + return fmt.Errorf("unsupported type: %T: %w", input, ErrDerefUnsupportedType) + } + + curRef := ref.String() + if curRef == "" { + return nil + } + + normalizedRef := normalizeRef(ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if r.isCircular(normalizedRef, basePath, parentRefs...) { + return nil + } + + if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) { + return err + } + + if ref.String() == "" || ref.String() == curRef { + // done with rereferencing + return nil + } + + parentRefs = append(parentRefs, normalizedRef.String()) + return r.deref(input, parentRefs, normalizedBasePath) +} + +func (r *schemaLoader) shouldStopOnError(err error) bool { + if err != nil && !r.options.ContinueOnError { + return true + } + + if err != nil { + log.Println(err) + } + + return false +} + +func (r *schemaLoader) setSchemaID(target interface{}, id, basePath string) (string, string) { + debugLog("schema has ID: %s", id) + + // handling the case when id is a folder + // remember that basePath has to point to a file + var refPath string + if strings.HasSuffix(id, "/") { + // ensure this is detected as a file, not a folder + refPath = fmt.Sprintf("%s%s", id, "placeholder.json") + } else { + refPath = id + } + + // updates the current base path + // * important: ID can be a relative path + // * registers target to be fetchable from the new base proposed by this id + newBasePath := normalizeURI(refPath, basePath) + + // store found IDs for possible future reuse in $ref + r.cache.Set(newBasePath, target) + + // the root document has an ID: all $ref relative to that ID may + // be rebased relative to the root document + if basePath == r.context.basePath { + debugLog("root document is a schema with ID: %s (normalized as:%s)", id, newBasePath) + r.context.rootID = newBasePath + } + + return newBasePath, refPath +} + +func defaultSchemaLoader( + root interface{}, + expandOptions *ExpandOptions, + cache ResolutionCache, + context *resolverContext) *schemaLoader { + + if expandOptions == nil { + expandOptions = &ExpandOptions{} + } + + cache = cacheOrDefault(cache) + + if expandOptions.RelativeBase == "" { + // if no relative base is provided, assume the root document + // contains all $ref, or at least, that the relative documents + // may be resolved from the current working directory. + expandOptions.RelativeBase = baseForRoot(root, cache) + } + debugLog("effective expander options: %#v", expandOptions) + + if context == nil { + context = newResolverContext(expandOptions) + } + + return &schemaLoader{ + root: root, + options: expandOptions, + cache: cache, + context: context, + } +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/security_scheme.go b/test/tools/vendor/github.com/go-openapi/spec/security_scheme.go new file mode 100644 index 0000000000..9d0bdae908 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/security_scheme.go @@ -0,0 +1,170 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + basic = "basic" + apiKey = "apiKey" + oauth2 = "oauth2" + implicit = "implicit" + password = "password" + application = "application" + accessCode = "accessCode" +) + +// BasicAuth creates a basic auth security scheme +func BasicAuth() *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} +} + +// APIKeyAuth creates an api key auth security scheme +func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} +} + +// OAuth2Implicit creates an implicit flow oauth2 security scheme +func OAuth2Implicit(authorizationURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: implicit, + AuthorizationURL: authorizationURL, + }} +} + +// OAuth2Password creates a password flow oauth2 security scheme +func OAuth2Password(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: password, + TokenURL: tokenURL, + }} +} + +// OAuth2Application creates an application flow oauth2 security scheme +func OAuth2Application(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: application, + TokenURL: tokenURL, + }} +} + +// OAuth2AccessToken creates an access token flow oauth2 security scheme +func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: accessCode, + AuthorizationURL: authorizationURL, + TokenURL: tokenURL, + }} +} + +// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section +type SecuritySchemeProps struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 +} + +// AddScope adds a scope to this security scheme +func (s *SecuritySchemeProps) AddScope(scope, description string) { + if s.Scopes == nil { + s.Scopes = make(map[string]string) + } + s.Scopes[scope] = description +} + +// SecurityScheme allows the definition of a security scheme that can be used by the operations. +// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) +// and OAuth2's common flows (implicit, password, application and access code). +// +// For more information: http://goo.gl/8us55a#securitySchemeObject +type SecurityScheme struct { + VendorExtensible + SecuritySchemeProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SecurityScheme) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (s SecurityScheme) MarshalJSON() ([]byte, error) { + var ( + b1 []byte + err error + ) + + if s.Type == oauth2 && (s.Flow == "implicit" || s.Flow == "accessCode") { + // when oauth2 for implicit or accessCode flows, empty AuthorizationURL is added as empty string + b1, err = json.Marshal(s.SecuritySchemeProps) + } else { + // when not oauth2, empty AuthorizationURL should be omitted + b1, err = json.Marshal(struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 + }{ + Description: s.Description, + Type: s.Type, + Name: s.Name, + In: s.In, + Flow: s.Flow, + AuthorizationURL: s.AuthorizationURL, + TokenURL: s.TokenURL, + Scopes: s.Scopes, + }) + } + if err != nil { + return nil, err + } + + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *SecurityScheme) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { + return err + } + return json.Unmarshal(data, &s.VendorExtensible) +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/spec.go b/test/tools/vendor/github.com/go-openapi/spec/spec.go new file mode 100644 index 0000000000..7d38b6e625 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/spec.go @@ -0,0 +1,78 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" +) + +//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json +//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema +//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... +//go:generate perl -pi -e s,Json,JSON,g bindata.go + +const ( + // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs + SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" + // JSONSchemaURL the url for the json schema schema + JSONSchemaURL = "http://json-schema.org/draft-04/schema#" +) + +// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error +func MustLoadJSONSchemaDraft04() *Schema { + d, e := JSONSchemaDraft04() + if e != nil { + panic(e) + } + return d +} + +// JSONSchemaDraft04 loads the json schema document for json shema draft04 +func JSONSchemaDraft04() (*Schema, error) { + b, err := Asset("jsonschema-draft-04.json") + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} + +// MustLoadSwagger20Schema panics when Swagger20Schema returns an error +func MustLoadSwagger20Schema() *Schema { + d, e := Swagger20Schema() + if e != nil { + panic(e) + } + return d +} + +// Swagger20Schema loads the swagger 2.0 schema from the embedded assets +func Swagger20Schema() (*Schema, error) { + + b, err := Asset("v2/schema.json") + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/swagger.go b/test/tools/vendor/github.com/go-openapi/spec/swagger.go new file mode 100644 index 0000000000..44722ffd5a --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/swagger.go @@ -0,0 +1,448 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "fmt" + "strconv" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Swagger this is the root document object for the API specification. +// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) +// together into one document. +// +// For more information: http://goo.gl/8us55a#swagger-object- +type Swagger struct { + VendorExtensible + SwaggerProps +} + +// JSONLookup look up a value by the json property name +func (s Swagger) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token) + return r, err +} + +// MarshalJSON marshals this swagger structure to json +func (s Swagger) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SwaggerProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON unmarshals a swagger spec from json +func (s *Swagger) UnmarshalJSON(data []byte) error { + var sw Swagger + if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { + return err + } + if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { + return err + } + *s = sw + return nil +} + +// GobEncode provides a safe gob encoder for Swagger, including extensions +func (s Swagger) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw := struct { + Props SwaggerProps + Ext VendorExtensible + }{ + Props: s.SwaggerProps, + Ext: s.VendorExtensible, + } + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Swagger, including extensions +func (s *Swagger) GobDecode(b []byte) error { + var raw struct { + Props SwaggerProps + Ext VendorExtensible + } + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + s.SwaggerProps = raw.Props + s.VendorExtensible = raw.Ext + return nil +} + +// SwaggerProps captures the top-level properties of an Api specification +// +// NOTE: validation rules +// - the scheme, when present must be from [http, https, ws, wss] +// - BasePath must start with a leading "/" +// - Paths is required +type SwaggerProps struct { + ID string `json:"id,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Swagger string `json:"swagger,omitempty"` + Info *Info `json:"info,omitempty"` + Host string `json:"host,omitempty"` + BasePath string `json:"basePath,omitempty"` + Paths *Paths `json:"paths"` + Definitions Definitions `json:"definitions,omitempty"` + Parameters map[string]Parameter `json:"parameters,omitempty"` + Responses map[string]Response `json:"responses,omitempty"` + SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Tags []Tag `json:"tags,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +type swaggerPropsAlias SwaggerProps + +type gobSwaggerPropsAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *swaggerPropsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements +func (o SwaggerProps) GobEncode() ([]byte, error) { + raw := gobSwaggerPropsAlias{ + Alias: (*swaggerPropsAlias)(&o), + } + + var b bytes.Buffer + if o.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(o.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(o.Security)) + for _, req := range o.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements +func (o *SwaggerProps) GobDecode(b []byte) error { + var raw gobSwaggerPropsAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *o = *(*SwaggerProps)(raw.Alias) + return nil +} + +// Dependencies represent a dependencies property +type Dependencies map[string]SchemaOrStringArray + +// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property +type SchemaOrBool struct { + Allows bool + Schema *Schema +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) { + if token == "allows" { + return s.Allows, nil + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +var jsTrue = []byte("true") +var jsFalse = []byte("false") + +// MarshalJSON convert this object to JSON +func (s SchemaOrBool) MarshalJSON() ([]byte, error) { + if s.Schema != nil { + return json.Marshal(s.Schema) + } + + if s.Schema == nil && !s.Allows { + return jsFalse, nil + } + return jsTrue, nil +} + +// UnmarshalJSON converts this bool or schema object from a JSON structure +func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { + var nw SchemaOrBool + if len(data) >= 4 { + if data[0] == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') + } + *s = nw + return nil +} + +// SchemaOrStringArray represents a schema or a string array +type SchemaOrStringArray struct { + Schema *Schema + Property []string +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) { + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { + if len(s.Property) > 0 { + return json.Marshal(s.Property) + } + if s.Schema != nil { + return json.Marshal(s.Schema) + } + return []byte("null"), nil +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + var nw SchemaOrStringArray + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Property); err != nil { + return err + } + } + *s = nw + return nil +} + +// Definitions contains the models explicitly defined in this spec +// An object to hold data types that can be consumed and produced by operations. +// These data types can be primitives, arrays or models. +// +// For more information: http://goo.gl/8us55a#definitionsObject +type Definitions map[string]Schema + +// SecurityDefinitions a declaration of the security schemes available to be used in the specification. +// This does not enforce the security schemes on the operations and only serves to provide +// the relevant details for each scheme. +// +// For more information: http://goo.gl/8us55a#securityDefinitionsObject +type SecurityDefinitions map[string]*SecurityScheme + +// StringOrArray represents a value that can either be a string +// or an array of strings. Mainly here for serialization purposes +type StringOrArray []string + +// Contains returns true when the value is contained in the slice +func (s StringOrArray) Contains(value string) bool { + for _, str := range s { + if str == value { + return true + } + } + return false +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) { + if _, err := strconv.Atoi(token); err == nil { + r, _, err := jsonpointer.GetForToken(s.Schemas, token) + return r, err + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string +func (s *StringOrArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + + if first == '[' { + var parsed []string + if err := json.Unmarshal(data, &parsed); err != nil { + return err + } + *s = StringOrArray(parsed) + return nil + } + + var single interface{} + if err := json.Unmarshal(data, &single); err != nil { + return err + } + if single == nil { + return nil + } + switch v := single.(type) { + case string: + *s = StringOrArray([]string{v}) + return nil + default: + return fmt.Errorf("only string or array is allowed, not %T", single) + } +} + +// MarshalJSON converts this string or array to a JSON array or JSON string +func (s StringOrArray) MarshalJSON() ([]byte, error) { + if len(s) == 1 { + return json.Marshal([]string(s)[0]) + } + return json.Marshal([]string(s)) +} + +// SchemaOrArray represents a value that can either be a Schema +// or an array of Schema. Mainly here for serialization purposes +type SchemaOrArray struct { + Schema *Schema + Schemas []Schema +} + +// Len returns the number of schemas in this property +func (s SchemaOrArray) Len() int { + if s.Schema != nil { + return 1 + } + return len(s.Schemas) +} + +// ContainsType returns true when one of the schemas is of the specified type +func (s *SchemaOrArray) ContainsType(name string) bool { + if s.Schema != nil { + return s.Schema.Type != nil && s.Schema.Type.Contains(name) + } + return false +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrArray) MarshalJSON() ([]byte, error) { + if len(s.Schemas) > 0 { + return json.Marshal(s.Schemas) + } + return json.Marshal(s.Schema) +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { + var nw SchemaOrArray + var first byte + if len(data) > 1 { + first = data[0] + } + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Schemas); err != nil { + return err + } + } + *s = nw + return nil +} + +// vim:set ft=go noet sts=2 sw=2 ts=2: diff --git a/test/tools/vendor/github.com/go-openapi/spec/tag.go b/test/tools/vendor/github.com/go-openapi/spec/tag.go new file mode 100644 index 0000000000..faa3d3de1e --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/tag.go @@ -0,0 +1,75 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// TagProps describe a tag entry in the top level tags section of a swagger spec +type TagProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +// NewTag creates a new tag +func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { + return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} +} + +// Tag allows adding meta data to a single tag that is used by the +// [Operation Object](http://goo.gl/8us55a#operationObject). +// It is not mandatory to have a Tag Object per tag used there. +// +// For more information: http://goo.gl/8us55a#tagObject +type Tag struct { + VendorExtensible + TagProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (t Tag) JSONLookup(token string) (interface{}, error) { + if ex, ok := t.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(t.TagProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (t Tag) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(t.TagProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(t.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (t *Tag) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &t.TagProps); err != nil { + return err + } + return json.Unmarshal(data, &t.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/url_go18.go b/test/tools/vendor/github.com/go-openapi/spec/url_go18.go similarity index 100% rename from vendor/github.com/go-openapi/spec/url_go18.go rename to test/tools/vendor/github.com/go-openapi/spec/url_go18.go diff --git a/test/tools/vendor/github.com/go-openapi/spec/url_go19.go b/test/tools/vendor/github.com/go-openapi/spec/url_go19.go new file mode 100644 index 0000000000..392e3e6395 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/url_go19.go @@ -0,0 +1,14 @@ +//go:build go1.19 +// +build go1.19 + +package spec + +import "net/url" + +func parseURL(s string) (*url.URL, error) { + u, err := url.Parse(s) + if err == nil { + u.OmitHost = false + } + return u, err +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/validations.go b/test/tools/vendor/github.com/go-openapi/spec/validations.go new file mode 100644 index 0000000000..6360a8ea77 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/validations.go @@ -0,0 +1,215 @@ +package spec + +// CommonValidations describe common JSON-schema validations +type CommonValidations struct { + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` +} + +// SetValidations defines all validations for a simple schema. +// +// NOTE: the input is the larger set of validations available for schemas. +// For simple schemas, MinProperties and MaxProperties are ignored. +func (v *CommonValidations) SetValidations(val SchemaValidations) { + v.Maximum = val.Maximum + v.ExclusiveMaximum = val.ExclusiveMaximum + v.Minimum = val.Minimum + v.ExclusiveMinimum = val.ExclusiveMinimum + v.MaxLength = val.MaxLength + v.MinLength = val.MinLength + v.Pattern = val.Pattern + v.MaxItems = val.MaxItems + v.MinItems = val.MinItems + v.UniqueItems = val.UniqueItems + v.MultipleOf = val.MultipleOf + v.Enum = val.Enum +} + +type clearedValidation struct { + Validation string + Value interface{} +} + +type clearedValidations []clearedValidation + +func (c clearedValidations) apply(cbs []func(string, interface{})) { + for _, cb := range cbs { + for _, cleared := range c { + cb(cleared.Validation, cleared.Value) + } + } +} + +// ClearNumberValidations clears all number validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearNumberValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 5) + defer func() { + done.apply(cbs) + }() + + if v.Minimum != nil { + done = append(done, clearedValidation{Validation: "minimum", Value: v.Minimum}) + v.Minimum = nil + } + if v.Maximum != nil { + done = append(done, clearedValidation{Validation: "maximum", Value: v.Maximum}) + v.Maximum = nil + } + if v.ExclusiveMaximum { + done = append(done, clearedValidation{Validation: "exclusiveMaximum", Value: v.ExclusiveMaximum}) + v.ExclusiveMaximum = false + } + if v.ExclusiveMinimum { + done = append(done, clearedValidation{Validation: "exclusiveMinimum", Value: v.ExclusiveMinimum}) + v.ExclusiveMinimum = false + } + if v.MultipleOf != nil { + done = append(done, clearedValidation{Validation: "multipleOf", Value: v.MultipleOf}) + v.MultipleOf = nil + } +} + +// ClearStringValidations clears all string validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearStringValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 3) + defer func() { + done.apply(cbs) + }() + + if v.Pattern != "" { + done = append(done, clearedValidation{Validation: "pattern", Value: v.Pattern}) + v.Pattern = "" + } + if v.MinLength != nil { + done = append(done, clearedValidation{Validation: "minLength", Value: v.MinLength}) + v.MinLength = nil + } + if v.MaxLength != nil { + done = append(done, clearedValidation{Validation: "maxLength", Value: v.MaxLength}) + v.MaxLength = nil + } +} + +// ClearArrayValidations clears all array validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearArrayValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 3) + defer func() { + done.apply(cbs) + }() + + if v.MaxItems != nil { + done = append(done, clearedValidation{Validation: "maxItems", Value: v.MaxItems}) + v.MaxItems = nil + } + if v.MinItems != nil { + done = append(done, clearedValidation{Validation: "minItems", Value: v.MinItems}) + v.MinItems = nil + } + if v.UniqueItems { + done = append(done, clearedValidation{Validation: "uniqueItems", Value: v.UniqueItems}) + v.UniqueItems = false + } +} + +// Validations returns a clone of the validations for a simple schema. +// +// NOTE: in the context of simple schema objects, MinProperties, MaxProperties +// and PatternProperties remain unset. +func (v CommonValidations) Validations() SchemaValidations { + return SchemaValidations{ + CommonValidations: v, + } +} + +// HasNumberValidations indicates if the validations are for numbers or integers +func (v CommonValidations) HasNumberValidations() bool { + return v.Maximum != nil || v.Minimum != nil || v.MultipleOf != nil +} + +// HasStringValidations indicates if the validations are for strings +func (v CommonValidations) HasStringValidations() bool { + return v.MaxLength != nil || v.MinLength != nil || v.Pattern != "" +} + +// HasArrayValidations indicates if the validations are for arrays +func (v CommonValidations) HasArrayValidations() bool { + return v.MaxItems != nil || v.MinItems != nil || v.UniqueItems +} + +// HasEnum indicates if the validation includes some enum constraint +func (v CommonValidations) HasEnum() bool { + return len(v.Enum) > 0 +} + +// SchemaValidations describes the validation properties of a schema +// +// NOTE: at this moment, this is not embedded in SchemaProps because this would induce a breaking change +// in the exported members: all initializers using litterals would fail. +type SchemaValidations struct { + CommonValidations + + PatternProperties SchemaProperties `json:"patternProperties,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` +} + +// HasObjectValidations indicates if the validations are for objects +func (v SchemaValidations) HasObjectValidations() bool { + return v.MaxProperties != nil || v.MinProperties != nil || v.PatternProperties != nil +} + +// SetValidations for schema validations +func (v *SchemaValidations) SetValidations(val SchemaValidations) { + v.CommonValidations.SetValidations(val) + v.PatternProperties = val.PatternProperties + v.MaxProperties = val.MaxProperties + v.MinProperties = val.MinProperties +} + +// Validations for a schema +func (v SchemaValidations) Validations() SchemaValidations { + val := v.CommonValidations.Validations() + val.PatternProperties = v.PatternProperties + val.MinProperties = v.MinProperties + val.MaxProperties = v.MaxProperties + return val +} + +// ClearObjectValidations returns a clone of the validations with all object validations cleared. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *SchemaValidations) ClearObjectValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 3) + defer func() { + done.apply(cbs) + }() + + if v.MaxProperties != nil { + done = append(done, clearedValidation{Validation: "maxProperties", Value: v.MaxProperties}) + v.MaxProperties = nil + } + if v.MinProperties != nil { + done = append(done, clearedValidation{Validation: "minProperties", Value: v.MinProperties}) + v.MinProperties = nil + } + if v.PatternProperties != nil { + done = append(done, clearedValidation{Validation: "patternProperties", Value: v.PatternProperties}) + v.PatternProperties = nil + } +} diff --git a/test/tools/vendor/github.com/go-openapi/spec/xml_object.go b/test/tools/vendor/github.com/go-openapi/spec/xml_object.go new file mode 100644 index 0000000000..945a46703d --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/spec/xml_object.go @@ -0,0 +1,68 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// XMLObject a metadata object that allows for more fine-tuned XML model definitions. +// +// For more information: http://goo.gl/8us55a#xmlObject +type XMLObject struct { + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + Prefix string `json:"prefix,omitempty"` + Attribute bool `json:"attribute,omitempty"` + Wrapped bool `json:"wrapped,omitempty"` +} + +// WithName sets the xml name for the object +func (x *XMLObject) WithName(name string) *XMLObject { + x.Name = name + return x +} + +// WithNamespace sets the xml namespace for the object +func (x *XMLObject) WithNamespace(namespace string) *XMLObject { + x.Namespace = namespace + return x +} + +// WithPrefix sets the xml prefix for the object +func (x *XMLObject) WithPrefix(prefix string) *XMLObject { + x.Prefix = prefix + return x +} + +// AsAttribute flags this object as xml attribute +func (x *XMLObject) AsAttribute() *XMLObject { + x.Attribute = true + return x +} + +// AsElement flags this object as an xml node +func (x *XMLObject) AsElement() *XMLObject { + x.Attribute = false + return x +} + +// AsWrapped flags this object as wrapped, this is mostly useful for array types +func (x *XMLObject) AsWrapped() *XMLObject { + x.Wrapped = true + return x +} + +// AsUnwrapped flags this object as an xml node +func (x *XMLObject) AsUnwrapped() *XMLObject { + x.Wrapped = false + return x +} diff --git a/test/tools/vendor/github.com/go-openapi/strfmt/.editorconfig b/test/tools/vendor/github.com/go-openapi/strfmt/.editorconfig new file mode 100644 index 0000000000..3152da69a5 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/strfmt/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/test/tools/vendor/github.com/go-openapi/strfmt/.gitattributes b/test/tools/vendor/github.com/go-openapi/strfmt/.gitattributes new file mode 100644 index 0000000000..d020be8ea4 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/strfmt/.gitattributes @@ -0,0 +1,2 @@ +*.go text eol=lf + diff --git a/test/tools/vendor/github.com/go-openapi/strfmt/.gitignore b/test/tools/vendor/github.com/go-openapi/strfmt/.gitignore new file mode 100644 index 0000000000..dd91ed6a04 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/strfmt/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/test/tools/vendor/github.com/go-openapi/strfmt/.golangci.yml b/test/tools/vendor/github.com/go-openapi/strfmt/.golangci.yml new file mode 100644 index 0000000000..be4899cb12 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/strfmt/.golangci.yml @@ -0,0 +1,59 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 31 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 4 + +linters: + enable: + - revive + - goimports + - gosec + - unparam + - unconvert + - predeclared + - prealloc + - misspell + + # disable: + # - maligned + # - lll + # - gochecknoinits + # - gochecknoglobals + # - godox + # - gocognit + # - whitespace + # - wsl + # - funlen + # - wrapcheck + # - testpackage + # - nlreturn + # - gofumpt + # - goerr113 + # - gci + # - gomnd + # - godot + # - exhaustivestruct + # - paralleltest + # - varnamelen + # - ireturn + # - exhaustruct + # #- thelper + +issues: + exclude-rules: + - path: bson.go + text: "should be .*ObjectID" + linters: + - golint + - stylecheck + diff --git a/test/tools/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md b/test/tools/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/test/tools/vendor/github.com/go-openapi/strfmt/LICENSE b/test/tools/vendor/github.com/go-openapi/strfmt/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/strfmt/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/tools/vendor/github.com/go-openapi/strfmt/README.md b/test/tools/vendor/github.com/go-openapi/strfmt/README.md new file mode 100644 index 0000000000..0cf89d7766 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/strfmt/README.md @@ -0,0 +1,88 @@ +# Strfmt [![Build Status](https://travis-ci.org/go-openapi/strfmt.svg?branch=master)](https://travis-ci.org/go-openapi/strfmt) [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/strfmt.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) + +This package exposes a registry of data types to support string formats in the go-openapi toolkit. + +strfmt represents a well known string format such as credit card or email. The go toolkit for OpenAPI specifications knows how to deal with those. + +## Supported data formats +go-openapi/strfmt follows the swagger 2.0 specification with the following formats +defined [here](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types). + +It also provides convenient extensions to go-openapi users. + +- [x] JSON-schema draft 4 formats + - date-time + - email + - hostname + - ipv4 + - ipv6 + - uri +- [x] swagger 2.0 format extensions + - binary + - byte (e.g. base64 encoded string) + - date (e.g. "1970-01-01") + - password +- [x] go-openapi custom format extensions + - bsonobjectid (BSON objectID) + - creditcard + - duration (e.g. "3 weeks", "1ms") + - hexcolor (e.g. "#FFFFFF") + - isbn, isbn10, isbn13 + - mac (e.g "01:02:03:04:05:06") + - rgbcolor (e.g. "rgb(100,100,100)") + - ssn + - uuid, uuid3, uuid4, uuid5 + - cidr (e.g. "192.0.2.1/24", "2001:db8:a0b:12f0::1/32") + - ulid (e.g. "00000PP9HGSBSSDZ1JTEXBJ0PW", [spec](https://github.com/ulid/spec)) + +> NOTE: as the name stands for, this package is intended to support string formatting only. +> It does not provide validation for numerical values with swagger format extension for JSON types "number" or +> "integer" (e.g. float, double, int32...). + +## Type conversion + +All types defined here are stringers and may be converted to strings with `.String()`. +Note that most types defined by this package may be converted directly to string like `string(Email{})`. + +`Date` and `DateTime` may be converted directly to `time.Time` like `time.Time(Time{})`. +Similarly, you can convert `Duration` to `time.Duration` as in `time.Duration(Duration{})` + +## Using pointers + +The `conv` subpackage provides helpers to convert the types to and from pointers, just like `go-openapi/swag` does +with primitive types. + +## Format types +Types defined in strfmt expose marshaling and validation capabilities. + +List of defined types: +- Base64 +- CreditCard +- Date +- DateTime +- Duration +- Email +- HexColor +- Hostname +- IPv4 +- IPv6 +- CIDR +- ISBN +- ISBN10 +- ISBN13 +- MAC +- ObjectId +- Password +- RGBColor +- SSN +- URI +- UUID +- UUID3 +- UUID4 +- UUID5 +- [ULID](https://github.com/ulid/spec) diff --git a/test/tools/vendor/github.com/go-openapi/strfmt/bson.go b/test/tools/vendor/github.com/go-openapi/strfmt/bson.go new file mode 100644 index 0000000000..a8a3604a2c --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/strfmt/bson.go @@ -0,0 +1,165 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "fmt" + + "go.mongodb.org/mongo-driver/bson" + + "go.mongodb.org/mongo-driver/bson/bsontype" + bsonprim "go.mongodb.org/mongo-driver/bson/primitive" +) + +func init() { + var id ObjectId + // register this format in the default registry + Default.Add("bsonobjectid", &id, IsBSONObjectID) +} + +// IsBSONObjectID returns true when the string is a valid BSON.ObjectId +func IsBSONObjectID(str string) bool { + _, err := bsonprim.ObjectIDFromHex(str) + return err == nil +} + +// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) +// +// swagger:strfmt bsonobjectid +type ObjectId bsonprim.ObjectID //nolint:revive + +// NewObjectId creates a ObjectId from a Hex String +func NewObjectId(hex string) ObjectId { //nolint:revive + oid, err := bsonprim.ObjectIDFromHex(hex) + if err != nil { + panic(err) + } + return ObjectId(oid) +} + +// MarshalText turns this instance into text +func (id ObjectId) MarshalText() ([]byte, error) { + oid := bsonprim.ObjectID(id) + if oid == bsonprim.NilObjectID { + return nil, nil + } + return []byte(oid.Hex()), nil +} + +// UnmarshalText hydrates this instance from text +func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on + if len(data) == 0 { + *id = ObjectId(bsonprim.NilObjectID) + return nil + } + oidstr := string(data) + oid, err := bsonprim.ObjectIDFromHex(oidstr) + if err != nil { + return err + } + *id = ObjectId(oid) + return nil +} + +// Scan read a value from a database driver +func (id *ObjectId) Scan(raw interface{}) error { + var data []byte + switch v := raw.(type) { + case []byte: + data = v + case string: + data = []byte(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v", v) + } + + return id.UnmarshalText(data) +} + +// Value converts a value to a database driver value +func (id ObjectId) Value() (driver.Value, error) { + return driver.Value(bsonprim.ObjectID(id).Hex()), nil +} + +func (id ObjectId) String() string { + return bsonprim.ObjectID(id).Hex() +} + +// MarshalJSON returns the ObjectId as JSON +func (id ObjectId) MarshalJSON() ([]byte, error) { + return bsonprim.ObjectID(id).MarshalJSON() +} + +// UnmarshalJSON sets the ObjectId from JSON +func (id *ObjectId) UnmarshalJSON(data []byte) error { + var obj bsonprim.ObjectID + if err := obj.UnmarshalJSON(data); err != nil { + return err + } + *id = ObjectId(obj) + return nil +} + +// MarshalBSON renders the object id as a BSON document +func (id ObjectId) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": bsonprim.ObjectID(id)}) +} + +// UnmarshalBSON reads the objectId from a BSON document +func (id *ObjectId) UnmarshalBSON(data []byte) error { + var obj struct { + Data bsonprim.ObjectID + } + if err := bson.Unmarshal(data, &obj); err != nil { + return err + } + *id = ObjectId(obj.Data) + return nil +} + +// MarshalBSONValue is an interface implemented by types that can marshal themselves +// into a BSON document represented as bytes. The bytes returned must be a valid +// BSON document if the error is nil. +func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { + oid := bsonprim.ObjectID(id) + return bsontype.ObjectID, oid[:], nil +} + +// UnmarshalBSONValue is an interface implemented by types that can unmarshal a +// BSON value representation of themselves. The BSON bytes and type can be +// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it +// wishes to retain the data after returning. +func (id *ObjectId) UnmarshalBSONValue(_ bsontype.Type, data []byte) error { + var oid bsonprim.ObjectID + copy(oid[:], data) + *id = ObjectId(oid) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (id *ObjectId) DeepCopyInto(out *ObjectId) { + *out = *id +} + +// DeepCopy copies the receiver into a new ObjectId. +func (id *ObjectId) DeepCopy() *ObjectId { + if id == nil { + return nil + } + out := new(ObjectId) + id.DeepCopyInto(out) + return out +} diff --git a/test/tools/vendor/github.com/go-openapi/strfmt/date.go b/test/tools/vendor/github.com/go-openapi/strfmt/date.go new file mode 100644 index 0000000000..3c93381c7c --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/strfmt/date.go @@ -0,0 +1,187 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "time" + + "go.mongodb.org/mongo-driver/bson" +) + +func init() { + d := Date{} + // register this format in the default registry + Default.Add("date", &d, IsDate) +} + +// IsDate returns true when the string is a valid date +func IsDate(str string) bool { + _, err := time.Parse(RFC3339FullDate, str) + return err == nil +} + +const ( + // RFC3339FullDate represents a full-date as specified by RFC3339 + // See: http://goo.gl/xXOvVd + RFC3339FullDate = "2006-01-02" +) + +// Date represents a date from the API +// +// swagger:strfmt date +type Date time.Time + +// String converts this date into a string +func (d Date) String() string { + return time.Time(d).Format(RFC3339FullDate) +} + +// UnmarshalText parses a text representation into a date type +func (d *Date) UnmarshalText(text []byte) error { + if len(text) == 0 { + return nil + } + dd, err := time.ParseInLocation(RFC3339FullDate, string(text), DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(dd) + return nil +} + +// MarshalText serializes this date type to string +func (d Date) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// Scan scans a Date value from database driver type. +func (d *Date) Scan(raw interface{}) error { + switch v := raw.(type) { + case []byte: + return d.UnmarshalText(v) + case string: + return d.UnmarshalText([]byte(v)) + case time.Time: + *d = Date(v) + return nil + case nil: + *d = Date{} + return nil + default: + return fmt.Errorf("cannot sql.Scan() strfmt.Date from: %#v", v) + } +} + +// Value converts Date to a primitive value ready to written to a database. +func (d Date) Value() (driver.Value, error) { + return driver.Value(d.String()), nil +} + +// MarshalJSON returns the Date as JSON +func (d Date) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Time(d).Format(RFC3339FullDate)) +} + +// UnmarshalJSON sets the Date from JSON +func (d *Date) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var strdate string + if err := json.Unmarshal(data, &strdate); err != nil { + return err + } + tt, err := time.ParseInLocation(RFC3339FullDate, strdate, DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(tt) + return nil +} + +func (d Date) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": d.String()}) +} + +func (d *Date) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if data, ok := m["data"].(string); ok { + rd, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(rd) + return nil + } + + return errors.New("couldn't unmarshal bson bytes value as Date") +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (d *Date) DeepCopyInto(out *Date) { + *out = *d +} + +// DeepCopy copies the receiver into a new Date. +func (d *Date) DeepCopy() *Date { + if d == nil { + return nil + } + out := new(Date) + d.DeepCopyInto(out) + return out +} + +// GobEncode implements the gob.GobEncoder interface. +func (d Date) GobEncode() ([]byte, error) { + return d.MarshalBinary() +} + +// GobDecode implements the gob.GobDecoder interface. +func (d *Date) GobDecode(data []byte) error { + return d.UnmarshalBinary(data) +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d Date) MarshalBinary() ([]byte, error) { + return time.Time(d).MarshalBinary() +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Date) UnmarshalBinary(data []byte) error { + var original time.Time + + err := original.UnmarshalBinary(data) + if err != nil { + return err + } + + *d = Date(original) + + return nil +} + +// Equal checks if two Date instances are equal +func (d Date) Equal(d2 Date) bool { + return time.Time(d).Equal(time.Time(d2)) +} diff --git a/test/tools/vendor/github.com/go-openapi/strfmt/default.go b/test/tools/vendor/github.com/go-openapi/strfmt/default.go new file mode 100644 index 0000000000..a89a4de3f3 --- /dev/null +++ b/test/tools/vendor/github.com/go-openapi/strfmt/default.go @@ -0,0 +1,2035 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/mail" + "regexp" + "strings" + + "github.com/asaskevich/govalidator" + "go.mongodb.org/mongo-driver/bson" +) + +const ( + // HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114 + // A string instance is valid against this attribute if it is a valid + // representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. + // http://tools.ietf.org/html/rfc1034#section-3.5 + // ::= any one of the ten digits 0 through 9 + // var digit = /[0-9]/; + // ::= any one of the 52 alphabetic characters A through Z in upper case and a through z in lower case + // var letter = /[a-zA-Z]/; + // ::= | + // var letDig = /[0-9a-zA-Z]/; + // ::= | "-" + // var letDigHyp = /[-0-9a-zA-Z]/; + // ::= | + // var ldhStr = /[-0-9a-zA-Z]+/; + //