diff --git a/.github/workflows/empty-worksapce-smoke-test-on-minikube-ubi8.yaml b/.github/workflows/empty-worksapce-smoke-test-on-minikube-ubi8.yaml index 961b62a1..3ed3f063 100644 --- a/.github/workflows/empty-worksapce-smoke-test-on-minikube-ubi8.yaml +++ b/.github/workflows/empty-worksapce-smoke-test-on-minikube-ubi8.yaml @@ -17,8 +17,8 @@ on: - '**/*.md' - .devfile.yaml - LICENSE - - '.rebase/*' - 'base/ubi9/**' + - 'universal/ubi9/**' env: USERSTORY: CloneGitRepoAPI diff --git a/.github/workflows/empty-worksapce-smoke-test-on-minikube-ubi9.yaml b/.github/workflows/empty-worksapce-smoke-test-on-minikube-ubi9.yaml new file mode 100644 index 00000000..7ef56b51 --- /dev/null +++ b/.github/workflows/empty-worksapce-smoke-test-on-minikube-ubi9.yaml @@ -0,0 +1,116 @@ +# +# Copyright (c) 2019-2024 Red Hat, Inc. +# This program and the accompanying materials are made +# available under the terms of the Eclipse Public License 2.0 +# which is available at https://www.eclipse.org/legal/epl-2.0/ +# +# SPDX-License-Identifier: EPL-2.0 +# +# Contributors: +# Red Hat, Inc. - initial API and implementation +# + +name: Empty workspace smoke test on udi9 +on: + pull_request: + paths-ignore: + - '**/*.md' + - .devfile.yaml + - LICENSE + - 'base/ubi8/**' + - 'universal/ubi8/**' + +env: + USERSTORY: CloneGitRepoAPI + TS_API_TEST_KUBERNETES_COMMAND_LINE_TOOL: kubectl + DEPLOYMENT_TIMEOUT: 90s + PULL_POLICY: IfNotPresent + +jobs: + workspace-api-tests-on-minikube: + runs-on: ubuntu-22.04 + steps: + + - name: Checkout + uses: actions/checkout@master + - name: Free runner space + run: | + sudo rm -rf /usr/local/lib/android + # obtain the PR number for tegging the image + - name: Get PR number + id: get_pr_number + run: | + pr_number=$(echo $GITHUB_REF | awk 'BEGIN { FS = "/" } ; { print $3 }') + echo "PR_NUMBER=$pr_number" >> $GITHUB_ENV + echo ">>>>>>>>>>>$pr_number" + + - name: Cleanup build-in images + run: | + # remove build-in images from the VM because it is not used + docker rmi -f $(docker images -aq) + + - name: Start minikube cluster + id: run-minikube + uses: che-incubator/setup-minikube-action@next + with: + minikube-version: v1.31.0 + + # connect with docker daemon in the minikube and build an image there + # we need to build the image in the minikube because we have just 14 GB of space on the runner + # the UBI have more than 9 GB size this approach saves the disk space + - name: Build base image + run: | + eval $(minikube docker-env) + cd base/ubi9 && docker build -t quay.io/devfile/base-developer-image:ubi9-latest . + + - name: Build universal image + run: | + eval $(minikube docker-env) + cd universal/ubi9 && docker build -t quay.io/devfile/universal-developer-image:${{ env.PR_NUMBER }} . + + - name: Checkout DWO + uses: actions/checkout@master + with: + repository: devfile/devworkspace-operator + path: devworkspace-operator + + - name: Setup cert manager + run: | + cd devworkspace-operator + make install_cert_manager + kubectl wait deployment -n cert-manager cert-manager --for condition=Available=True --timeout=$DEPLOYMENT_TIMEOUT + kubectl wait deployment -n cert-manager cert-manager-cainjector --for condition=Available=True --timeout=$DEPLOYMENT_TIMEOUT + kubectl wait deployment -n cert-manager cert-manager-webhook --for condition=Available=True --timeout=$DEPLOYMENT_TIMEOUT + + - name: Setup DWO + run: | + cd devworkspace-operator + make install + kubectl rollout status deployment -n devworkspace-controller devworkspace-controller-manager --timeout=$DEPLOYMENT_TIMEOUT + kubectl rollout status deployment -n devworkspace-controller devworkspace-webhook-server --timeout=$DEPLOYMENT_TIMEOUT + kubectl wait deployment -n devworkspace-controller devworkspace-webhook-server --for condition=Available=True --timeout=$DEPLOYMENT_TIMEOUT + kubectl wait deployment -n devworkspace-controller devworkspace-controller-manager --for condition=Available=True --timeout=$DEPLOYMENT_TIMEOUT + + - name: Check that UDI is presen in the image list + run: | + # we used it for the build above and do not need it anymore. It saves the disk space + minikube image rm quay.io/devfile/base-developer-image:ubi9-latest + minikube image list --format table + + - name: Install NodeJs + uses: actions/setup-node@v4 + + - name: Checkout tests codebase + uses: actions/checkout@master + with: + ref: api-test-with-clone-project-without-generating + repository: eclipse/che + path: che + + - name: Run Empty workspace smoke test + run: | + export TS_API_TEST_UDI_IMAGE=quay.io/devfile/universal-developer-image:${{ env.PR_NUMBER }} + cd che/tests/e2e + npm i + npm run driver-less-test + diff --git a/README.md b/README.md index 9d597fa0..b2dafa3a 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ $ docker run -ti --rm \ ``` ### Included Development Tools -| Tool | ubi8 based image | +| Tool | ubi9 based image | |---------------------|-------------------------------------| | `bash` |`bash` | | `bat` |`` | @@ -96,7 +96,7 @@ docker run -ti --rm \ ``` ### Included Development Tools -| Tool or language | ubi8 based image | +| Tool or language | ubi9 based image | |---------------------|-------------------------------------| |--------JAVA---------|-------------------------------------| | `sdk` |`` | @@ -106,7 +106,7 @@ docker run -ti --rm \ | `java` |`<21.0.2-tem via sdkman>` | | `maven` |`` | | `gradle` |`` | -| `mandrel` |`<22.1.0.0.r17-mandrel via sdkman>` | +| `mandrel` |`<22.1.2.r21-mandrel via sdkman>` | | `jbang` |`` | |--------SCALA--------|-------------------------------------| | `cs` |`` | diff --git a/base/ubi9/.stow-local-ignore b/base/ubi9/.stow-local-ignore new file mode 100644 index 00000000..e4a7df98 --- /dev/null +++ b/base/ubi9/.stow-local-ignore @@ -0,0 +1,12 @@ +# .viminfo cannot be a symlink for security reasons +\.viminfo + +# We store bash related files in /home/tooling/ so they aren't overriden if persistUserHome is enabled +# but we don't want them to be symbolic links (or to cause stow conflicts). They will be copied to /home/user/ manually. +\.bashrc +\.bash_profile + +# Ignore absolute symbolic links, as they are not supported by stow +\.krew +\.sdkman +\.local/bin/podman diff --git a/base/ubi9/Dockerfile b/base/ubi9/Dockerfile index 8e1533b3..11f7f022 100644 --- a/base/ubi9/Dockerfile +++ b/base/ubi9/Dockerfile @@ -19,17 +19,26 @@ LABEL io.openshift.expose-services="" USER 0 -# Removed because of vulnerabilities: git-lfs -RUN dnf install -y diffutils git iproute jq less lsof man nano procps \ - perl-Digest-SHA net-tools openssh-clients rsync socat sudo time vim wget zip && \ +ENV HOME=/home/tooling +RUN mkdir -p /home/tooling/ + +## add epel repos so that p7zip p7zip-plugins stow can be found +RUN dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm && \ + dnf install -y diffutils git git-lfs iproute jq less lsof man nano procps p7zip p7zip-plugins \ + perl-Digest-SHA net-tools openssh-clients rsync socat sudo time vim wget zip stow && \ dnf update -y && \ dnf clean all +## podman buildah skopeo +RUN dnf -y reinstall shadow-utils && \ + dnf -y install podman buildah skopeo fuse-overlayfs && \ + dnf clean all + ## gh-cli RUN \ TEMP_DIR="$(mktemp -d)"; \ cd "${TEMP_DIR}"; \ - GH_VERSION="2.23.0"; \ + GH_VERSION="2.45.0"; \ GH_ARCH="linux_amd64"; \ GH_TGZ="gh_${GH_VERSION}_${GH_ARCH}.tar.gz"; \ GH_TGZ_URL="https://github.com/cli/cli/releases/download/v${GH_VERSION}/${GH_TGZ}"; \ @@ -88,16 +97,69 @@ RUN \ cd - && \ rm -rf "${TEMP_DIR}" + # Define user directory for binaries +ENV PATH="/home/user/.local/bin:$PATH" + +# Set up environment variables to note that this is +# not starting with usernamespace and default to +# isolate the filesystem with chroot. +ENV _BUILDAH_STARTED_IN_USERNS="" BUILDAH_ISOLATION=chroot + +# Tweaks to make rootless buildah work +RUN touch /etc/subgid /etc/subuid && \ + chmod g=u /etc/subgid /etc/subuid /etc/passwd && \ + echo user:10000:65536 > /etc/subuid && \ + echo user:10000:65536 > /etc/subgid + +# Adjust storage.conf to enable Fuse storage. +RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' /etc/containers/storage.conf +RUN mkdir -p /var/lib/shared/overlay-images /var/lib/shared/overlay-layers; \ + touch /var/lib/shared/overlay-images/images.lock; \ + touch /var/lib/shared/overlay-layers/layers.lock + +# But use VFS since not all environments support overlay with Fuse backend +RUN mkdir -p "${HOME}"/.config/containers && \ + (echo '[storage]';echo 'driver = "vfs"') > "${HOME}"/.config/containers/storage.conf && \ + chown -R 10001 "${HOME}"/.config + +# Add kubedock +ENV KUBEDOCK_VERSION 0.17.0 +ENV KUBECONFIG=/home/user/.kube/config +RUN curl -L https://github.com/joyrex2001/kubedock/releases/download/${KUBEDOCK_VERSION}/kubedock_${KUBEDOCK_VERSION}_linux_amd64.tar.gz | tar -C /usr/local/bin -xz --no-same-owner \ + && chmod +x /usr/local/bin/kubedock +COPY --chown=0:0 kubedock_setup.sh /usr/local/bin/kubedock_setup + +# Configure Podman wrapper +ENV PODMAN_WRAPPER_PATH=/usr/bin/podman.wrapper +ENV ORIGINAL_PODMAN_PATH=/usr/bin/podman.orig +COPY --chown=0:0 podman-wrapper.sh "${PODMAN_WRAPPER_PATH}" +RUN mv /usr/bin/podman "${ORIGINAL_PODMAN_PATH}" + COPY --chown=0:0 entrypoint.sh / +COPY --chown=0:0 .stow-local-ignore /home/tooling/ RUN \ # add user and configure it useradd -u 10001 -G wheel,root -d /home/user --shell /bin/bash -m user && \ # Setup $PS1 for a consistent and reasonable prompt - echo "export PS1='\W \`git branch --show-current 2>/dev/null | sed -r -e \"s@^(.+)@\(\1\) @\"\`$ '" >> /home/user/.bashrc && \ + touch /etc/profile.d/udi_prompt.sh && \ + chown 10001 /etc/profile.d/udi_prompt.sh && \ + echo "export PS1='\W \`git branch --show-current 2>/dev/null | sed -r -e \"s@^(.+)@\(\1\) @\"\`$ '" >> /etc/profile.d/udi_prompt.sh && \ + # Copy the global git configuration to user config as global /etc/gitconfig + # file may be overwritten by a mounted file at runtime + cp /etc/gitconfig ${HOME}/.gitconfig && \ + chown 10001 ${HOME}/ ${HOME}/.viminfo ${HOME}/.gitconfig ${HOME}/.stow-local-ignore && \ # Set permissions on /etc/passwd and /home to allow arbitrary users to write chgrp -R 0 /home && \ chmod -R g=u /etc/passwd /etc/group /home && \ - chmod +x /entrypoint.sh + chmod +x /entrypoint.sh && \ + # Create symbolic links from /home/tooling/ -> /home/user/ + stow . -t /home/user/ -d /home/tooling/ && \ + # .viminfo cannot be a symbolic link for security reasons, so copy it to /home/user/ + cp /home/tooling/.viminfo /home/user/.viminfo && \ + # Bash-related files are backed up to /home/tooling/ incase they are deleted when persistUserHome is enabled. + cp /home/user/.bashrc /home/tooling/.bashrc && \ + cp /home/user/.bash_profile /home/tooling/.bash_profile && \ + chown 10001 /home/tooling/.bashrc /home/tooling/.bash_profile USER 10001 ENV HOME=/home/user diff --git a/base/ubi9/entrypoint.sh b/base/ubi9/entrypoint.sh index c3468c85..aec8312f 100644 --- a/base/ubi9/entrypoint.sh +++ b/base/ubi9/entrypoint.sh @@ -18,4 +18,6 @@ if ! whoami &> /dev/null; then fi fi +source kubedock_setup + exec "$@" diff --git a/base/ubi9/kubedock_setup.sh b/base/ubi9/kubedock_setup.sh new file mode 100755 index 00000000..2c8400f8 --- /dev/null +++ b/base/ubi9/kubedock_setup.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Kubedock setup script meant to be run from the entrypoint script. + +LOCAL_BIN=/home/user/.local/bin +ORIGINAL_PODMAN_PATH=${ORIGINAL_PODMAN_PATH:-"/usr/bin/podman.orig"} +PODMAN_WRAPPER_PATH=${PODMAN_WRAPPER_PATH:-"/usr/bin/podman.wrapper"} + +mkdir -p "${LOCAL_BIN}" + +if [ "${KUBEDOCK_ENABLED:-false}" = "true" ]; then + echo + echo "Kubedock is enabled (env variable KUBEDOCK_ENABLED is set to true)." + + SECONDS=0 + KUBEDOCK_TIMEOUT=${KUBEDOCK_TIMEOUT:-10} + until [ -f $KUBECONFIG ]; do + if ((SECONDS > KUBEDOCK_TIMEOUT)); then + break + fi + echo "Kubeconfig doesn't exist yet. Waiting..." + sleep 1 + done + + if [ -f $KUBECONFIG ]; then + echo "Kubeconfig found." + + KUBEDOCK_PARAMS=${KUBEDOCK_PARAMS:-"--reverse-proxy --kubeconfig $KUBECONFIG"} + + echo "Starting kubedock with params \"${KUBEDOCK_PARAMS}\"..." + + kubedock server ${KUBEDOCK_PARAMS} >/tmp/kubedock.log 2>&1 & + + echo "Done." + + echo "Replacing podman with podman-wrapper..." + + ln -f -s "${PODMAN_WRAPPER_PATH}" "${LOCAL_BIN}/podman" + + export TESTCONTAINERS_RYUK_DISABLED="true" + export TESTCONTAINERS_CHECKS_DISABLE="true" + + echo "Done." + echo + else + echo "Could not find Kubeconfig at $KUBECONFIG" + echo "Giving up..." + fi +else + echo + echo "Kubedock is disabled. It can be enabled with the env variable \"KUBEDOCK_ENABLED=true\"" + echo "set in the workspace Devfile or in a Kubernetes ConfigMap in the developer namespace." + echo + ln -f -s "${ORIGINAL_PODMAN_PATH}" "${LOCAL_BIN}/podman" +fi diff --git a/base/ubi9/podman-wrapper.sh b/base/ubi9/podman-wrapper.sh new file mode 100755 index 00000000..b7f7fbc9 --- /dev/null +++ b/base/ubi9/podman-wrapper.sh @@ -0,0 +1,38 @@ +#!/bin/bash +set -euo pipefail + +PODMAN_ORIGINAL_PATH=${PODMAN_ORIGINAL_PATH:-"/usr/bin/podman.orig"} +KUBEDOCK_SUPPORTED_COMMANDS=${KUBEDOCK_SUPPORTED_COMMANDS:-"run ps exec cp logs inspect kill rm wait stop start"} + +PODMAN_ARGS=( "$@" ) + +TRUE=0 +FALSE=1 + +exec_original_podman() { + exec ${PODMAN_ORIGINAL_PATH} "${PODMAN_ARGS[@]}" +} + +exec_kubedock_podman() { + exec env CONTAINER_HOST=tcp://127.0.0.1:2475 "${PODMAN_ORIGINAL_PATH}" "${PODMAN_ARGS[@]}" +} + +podman_command() { + echo "${PODMAN_ARGS[0]}" +} + +command_is_supported_by_kubedock() { + CMD=$(podman_command) + for SUPPORTED_CMD in $KUBEDOCK_SUPPORTED_COMMANDS; do + if [ "$SUPPORTED_CMD" = "$CMD" ]; then + return $TRUE + fi + done + return ${FALSE} +} + +if command_is_supported_by_kubedock; then + exec_kubedock_podman +else + exec_original_podman +fi diff --git a/universal/ubi9/Dockerfile b/universal/ubi9/Dockerfile new file mode 100644 index 00000000..29bfba60 --- /dev/null +++ b/universal/ubi9/Dockerfile @@ -0,0 +1,430 @@ +# syntax=docker/dockerfile:1.3-labs + +# updateBaseImages.sh can't operate on SHA-based tags as they're not date-based or semver-sequential, and therefore cannot be ordered +FROM quay.io/devfile/base-developer-image:ubi9-latest +LABEL maintainer="Red Hat, Inc." + +LABEL com.redhat.component="devfile-universal-container" +LABEL name="devfile/universal-developer-image" +LABEL version="ubi9" + +#label for EULA +LABEL com.redhat.license_terms="https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI" + +#labels for container catalog +LABEL summary="devfile universal developer image" +LABEL description="Image with developers tools. Languages SDK and runtimes included." +LABEL io.k8s.display-name="devfile-developer-universal" +LABEL io.openshift.expose-services="" + +USER 0 + +# $PROFILE_EXT contains all additions made to the bash environment +ENV PROFILE_EXT=/etc/profile.d/udi_environment.sh +RUN touch ${PROFILE_EXT} & chown 10001 ${PROFILE_EXT} + +USER 10001 + +# We install everything to /home/tooling/ as /home/user/ may get overriden, see github.com/eclipse/che/issues/22412 +ENV HOME=/home/tooling + +# Java +RUN curl -fsSL "https://get.sdkman.io/?rcupdate=false" | bash \ + && bash -c ". /home/tooling/.sdkman/bin/sdkman-init.sh \ + && sed -i "s/sdkman_auto_answer=false/sdkman_auto_answer=true/g" /home/tooling/.sdkman/etc/config \ + && sed -i "s/sdkman_auto_env=false/sdkman_auto_env=true/g" /home/tooling/.sdkman/etc/config \ + && sdk install java 8.0.402-tem \ + && sdk install java 11.0.22-tem \ + && sdk install java 17.0.10-tem \ + && sdk install java 21.0.2-tem \ + && sdk install java 23.1.2.r21-mandrel \ + && sdk default java 17.0.10-tem \ + && sdk install gradle \ + && sdk install maven \ + && sdk install jbang \ + && sdk flush archives \ + && sdk flush temp" \ + && chgrp -R 0 /home/tooling && chmod -R g=u /home/tooling + +# sdk home java +ENV JAVA_HOME_8=/home/tooling/.sdkman/candidates/java/8.0.402-tem +ENV JAVA_HOME_11=/home/tooling/.sdkman/candidates/java/11.0.22-tem +ENV JAVA_HOME_17=/home/tooling/.sdkman/candidates/java/17.0.10-tem +ENV JAVA_HOME_21=/home/tooling/.sdkman/candidates/java/21.0.2-tem + +# Java-related environment variables are described and set by ${PROFILE_EXT}, which will be loaded by ~/.bashrc +# To make Java working for dash and other shells, it needs to initialize them in the Dockerfile. +ENV SDKMAN_CANDIDATES_API="https://api.sdkman.io/2" +ENV SDKMAN_CANDIDATES_DIR="/home/tooling/.sdkman/candidates" +ENV SDKMAN_DIR="/home/tooling/.sdkman" +ENV SDKMAN_PLATFORM="linuxx64" +ENV SDKMAN_VERSION="5.18.2" + +ENV GRADLE_HOME="/home/tooling/.sdkman/candidates/gradle/current" +ENV JAVA_HOME="/home/tooling/.sdkman/candidates/java/current" +ENV MAVEN_HOME="/home/tooling/.sdkman/candidates/maven/current" + +ENV GRAALVM_HOME=/home/tooling/.sdkman/candidates/java/23.1.2.r21-mandrel + +ENV PATH="/home/tooling/.krew/bin:$PATH" +ENV PATH="/home/tooling/.sdkman/candidates/maven/current/bin:$PATH" +ENV PATH="/home/tooling/.sdkman/candidates/java/current/bin:$PATH" +ENV PATH="/home/tooling/.sdkman/candidates/gradle/current/bin:$PATH" +ENV PATH="/home/tooling/.local/share/coursier/bin:$PATH" + +# NodeJS +RUN mkdir -p /home/tooling/.nvm/ +ENV NVM_DIR="/home/tooling/.nvm" +ENV NODEJS_20_VERSION=20.18.0 +ENV NODEJS_18_VERSION=18.19.1 +ENV NODEJS_DEFAULT_VERSION=${NODEJS_18_VERSION} +RUN curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | PROFILE=/dev/null bash +RUN echo 'export NVM_DIR="$HOME/.nvm"' >> ${PROFILE_EXT} \ + && echo '[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"' >> ${PROFILE_EXT} +RUN source /home/user/.bashrc && \ + nvm install v${NODEJS_20_VERSION} && \ + nvm install v${NODEJS_18_VERSION} && \ + nvm alias default v${NODEJS_DEFAULT_VERSION} && nvm use v${NODEJS_DEFAULT_VERSION} && \ + npm install --global yarn@v1.22.17 &&\ + chgrp -R 0 /home/tooling && chmod -R g=u /home/tooling +ENV PATH=$NVM_DIR/versions/node/v${NODEJS_DEFAULT_VERSION}/bin:$PATH +ENV NODEJS_HOME_20=$NVM_DIR/versions/node/v${NODEJS_20_VERSION} +ENV NODEJS_HOME_18=$NVM_DIR/versions/node/v${NODEJS_18_VERSION} + +# kube +ENV KUBECONFIG=/home/user/.kube/config + +USER 0 + +# Required packages for AWT +RUN dnf install -y libXext libXrender libXtst libXi + +# Lombok +ENV LOMBOK_VERSION=1.18.18 +RUN wget -O /usr/local/lib/lombok.jar https://projectlombok.org/downloads/lombok-${LOMBOK_VERSION}.jar + +# Scala +RUN curl -fLo cs https://git.io/coursier-cli && \ + chmod +x cs && \ + mv cs /usr/local/bin/ +RUN curl -fLo sbt https://raw.githubusercontent.com/dwijnand/sbt-extras/master/sbt && \ + chmod +x sbt && \ + mv sbt /usr/local/bin/ +RUN curl -fLo mill https://raw.githubusercontent.com/lefou/millw/main/millw && \ + chmod +x mill && \ + mv mill /usr/local/bin/ + +# C/CPP +RUN dnf -y install llvm-toolset gcc gcc-c++ clang clang-libs clang-tools-extra gdb + +# Go 1.18+ - installed to /usr/bin/go +# gopls 0.10+ - installed to /home/tooling/go/bin/gopls and /home/tooling/go/pkg/mod/ +RUN dnf install -y go-toolset && \ + GO111MODULE=on go install -v golang.org/x/tools/gopls@latest && \ + chgrp -R 0 /home/tooling && chmod -R g=u /home/tooling +ENV GOBIN="/home/tooling/go/bin/" +ENV PATH="$GOBIN:$PATH" + +# Python +RUN dnf -y install python3.11 python3.11-devel python3.11-setuptools python3.11-pip nss_wrapper + +RUN cd /usr/bin \ + && if [ ! -L python ]; then ln -s python3.11 python; fi \ + && if [ ! -L pydoc ]; then ln -s pydoc3.11 pydoc; fi \ + && if [ ! -L python-config ]; then ln -s python3.11-config python-config; fi \ + && if [ ! -L pip ]; then ln -s pip-3.11 pip; fi + +RUN pip install pylint yq + +# PHP +ENV PHP_VERSION=8.2 +RUN dnf -y module enable php:$PHP_VERSION && \ + dnf install -y --setopt=tsflags=nodocs php php-mysqlnd php-pgsql php-bcmath \ + php-gd php-intl php-json php-ldap php-mbstring php-pdo \ + php-pear php-zlib php-mysqli php-curl php-xml php-devel\ + php-process php-soap php-opcache php-fpm ca-certificates \ + php-gmp php-pecl-xdebug php-pecl-zip mod_ssl hostname && \ + wget https://getcomposer.org/installer -O /tmp/composer-installer.php && \ + php /tmp/composer-installer.php --filename=composer --install-dir=/usr/local/bin + +ENV PHP_DEFAULT_INCLUDE_PATH=/usr/share/pear \ + PHP_SYSCONF_PATH=/etc \ + PHP_HTTPD_CONF_FILE=php.conf \ + HTTPD_MAIN_CONF_PATH=/etc/httpd/conf \ + HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d \ + HTTPD_MODULES_CONF_D_PATH=/etc/httpd/conf.modules.d \ + HTTPD_VAR_RUN=/var/run/httpd \ + HTTPD_DATA_PATH=/var/www \ + HTTPD_DATA_ORIG_PATH=/var/www \ + HTTPD_VAR_PATH=/var + +# .NET +ENV DOTNET_RPM_VERSION=8.0 +RUN dnf install -y dotnet-hostfxr-${DOTNET_RPM_VERSION} dotnet-runtime-${DOTNET_RPM_VERSION} dotnet-sdk-${DOTNET_RPM_VERSION} + +# rust +ENV CARGO_HOME=/home/tooling/.cargo \ + RUSTUP_HOME=/home/tooling/.rustup \ + PATH=/home/tooling/.cargo/bin:${PATH} +RUN curl --proto '=https' --tlsv1.2 -sSfo rustup https://sh.rustup.rs && \ + chmod +x rustup && \ + mv rustup /usr/bin/ && \ + rustup -y --no-modify-path --profile minimal -c rust-src -c rust-analysis -c rls && \ + chgrp -R 0 /home/tooling && chmod -R g=u /home/tooling + +# camel-k +ENV KAMEL_VERSION 2.2.0 +RUN curl -L https://github.com/apache/camel-k/releases/download/v${KAMEL_VERSION}/camel-k-client-${KAMEL_VERSION}-linux-amd64.tar.gz | tar -C /usr/local/bin -xz --no-same-owner \ + && chmod +x /usr/local/bin/kamel + +# Config directories +RUN mkdir -p /home/tooling/.m2 && \ + mkdir -p /home/tooling/.gradle && \ + mkdir -p /home/tooling/.config/pip && \ + mkdir -p /home/tooling/.sbt/1.0 && \ + mkdir -p /home/tooling/.cargo && \ + mkdir -p /home/tooling/certs && \ + mkdir -p /home/tooling/.composer && \ + mkdir -p /home/tooling/.nuget && \ + chgrp -R 0 /home/tooling && chmod -R g=u /home/tooling + +# Cloud + +# oc client +ENV OC_VERSION=4.15 +RUN curl -L https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp/stable-${OC_VERSION}/openshift-client-linux.tar.gz | tar -C /usr/local/bin -xz --no-same-owner \ + && chmod +x /usr/local/bin/oc + +# OS Pipelines CLI (tkn) +ENV TKN_VERSION=1.14.0 +RUN curl -L https://mirror.openshift.com/pub/openshift-v4/clients/pipelines/${TKN_VERSION}/tkn-linux-amd64.tar.gz | tar -C /usr/local/bin -xz --no-same-owner \ + && chmod +x /usr/local/bin/tkn /usr/local/bin/opc /usr/local/bin/tkn-pac + +RUN echo 'alias docker=podman' >> ${PROFILE_EXT} + +# Configure container engine +COPY --chown=0:0 containers.conf /etc/containers/containers.conf + +ENV K8S_VERSION=1.28 +## kubectl +RUN < /etc/yum.repos.d/kubernetes.repo +[kubernetes] +name=Kubernetes +baseurl=https://pkgs.k8s.io/core:/stable:/v${K8S_VERSION}/rpm/ +enabled=1 +gpgcheck=1 +gpgkey=https://pkgs.k8s.io/core:/stable:/v${K8S_VERSION}/rpm/repodata/repomd.xml.key +EOF2 + +dnf install -y kubectl +curl -sSL -o ~/.kubectl_aliases https://raw.githubusercontent.com/ahmetb/kubectl-alias/master/.kubectl_aliases +echo '[ -f ~/.kubectl_aliases ] && source ~/.kubectl_aliases' >> ${PROFILE_EXT} +EOF + +## shellcheck +RUN < "${KREW_TGZ}.sha256" + +sha256sum -c "${KREW_TGZ}.sha256" 2>&1 | grep OK + +tar -zxv --no-same-owner -f "${KREW_TGZ}" +./"krew-${KREW_ARCH}" install krew +echo 'export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH"' >> ${PROFILE_EXT} +export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH" +# kubens and kubectx +kubectl krew install ns +kubectl krew install ctx +cd - +rm -rf "${TEMP_DIR}" +EOF + +## helm +RUN <&1 | grep OK +tar -zxv --no-same-owner -f "${HELM_TGZ}" +mv "${HELM_ARCH}"/helm /usr/local/bin/helm +cd - +rm -rf "${TEMP_DIR}" +EOF + +## kustomize +RUN <&1 | grep OK +tar -zxv --no-same-owner -f "${KUSTOMIZE_TGZ}" +mv kustomize /usr/local/bin/ +cd - +rm -rf "${TEMP_DIR}" +EOF + +## tektoncd-cli +RUN <&1 | grep OK +tar -zxv --no-same-owner -f "${TKN_TGZ}" +mv tkn /usr/local/bin/ +cd - +rm -rf "${TEMP_DIR}" +EOF + +## knative-cli +RUN <&1 | grep OK +mv "${KN_BIN}" kn +chmod +x kn +mv kn /usr/local/bin +cd - +rm -rf "${TEMP_DIR}" +EOF + +## terraform-cli +RUN <&1 | grep OK +unzip ${TF_ZIP} +chmod +x terraform +mv terraform /usr/local/bin +cd - +rm -rf "${TEMP_DIR}" +EOF + +## skaffold +RUN curl -Lo skaffold https://storage.googleapis.com/skaffold/releases/latest/skaffold-linux-amd64 && \ + install skaffold /usr/local/bin/ + +# e2fsprogs setup +# Since e2fsprogs-static package has removed RHEL 8 distribution, it is not possible to install from the repository +# https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/considerations_in_adopting_rhel_8/index#removed-packages_changes-to-packages +RUN <&1 | grep OK +tar -zxv --no-same-owner -f "${E2FSPROGS_TGZ}" +cd "e2fsprogs-${E2FSPROGS_VERSION}" +mkdir build +cd build +../configure --prefix=/usr --with-root-prefix="" --enable-elf-shlibs --disable-evms +make +make install +make install-libs +cd - +rm -rf "${TEMP_DIR}" +EOF + +# Bash completions +RUN dnf -y install bash-completion \ + && dnf clean all \ + && rm -rf /var/cache/yum + +RUN < /usr/share/bash-completion/completions/oc +tkn completion bash > /usr/share/bash-completion/completions/tkn +kubectl completion bash > /usr/share/bash-completion/completions/kubectl +cat ${NVM_DIR}/bash_completion > /usr/share/bash-completion/completions/nvm +EOF + +## Add sdkman's init script launcher to the end of ${PROFILE_EXT} since we are not adding it on sdkman install +## NOTE: all modifications to ${PROFILE_EXT} must happen BEFORE this step in order for sdkman to function correctly +RUN echo 'export SDKMAN_DIR="/home/tooling/.sdkman"' >> ${PROFILE_EXT} +RUN echo '[[ -s "$SDKMAN_DIR/bin/sdkman-init.sh" ]] && source "$SDKMAN_DIR/bin/sdkman-init.sh"' >> ${PROFILE_EXT} + + +# Create symbolic links from /home/tooling/ -> /home/user/ +RUN stow . -t /home/user/ -d /home/tooling/ --no-folding + +# Set permissions on /etc/passwd, /etc/group, /etc/pki and /home to allow arbitrary users to write +RUN chgrp -R 0 /home && chmod -R g=u /etc/passwd /etc/group /home /etc/pki + +# cleanup dnf cache +RUN dnf -y clean all --enablerepo='*' + +COPY --chown=0:0 entrypoint.sh / + +USER 10001 + +ENV HOME=/home/user diff --git a/universal/ubi9/containers.conf b/universal/ubi9/containers.conf new file mode 100644 index 00000000..f6ad1381 --- /dev/null +++ b/universal/ubi9/containers.conf @@ -0,0 +1,4 @@ +[containers] +default_ulimits = [ + "nofile=65535:65535", +] diff --git a/universal/ubi9/entrypoint.sh b/universal/ubi9/entrypoint.sh new file mode 100755 index 00000000..aeb28436 --- /dev/null +++ b/universal/ubi9/entrypoint.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +source kubedock_setup + +# Stow +## Required for https://github.com/eclipse/che/issues/22412 + +# /home/user/ will be mounted to by a PVC if persistUserHome is enabled +mountpoint -q /home/user/; HOME_USER_MOUNTED=$? + +# This file will be created after stowing, to guard from executing stow everytime the container is started +STOW_COMPLETE=/home/user/.stow_completed + +if [ $HOME_USER_MOUNTED -eq 0 ] && [ ! -f $STOW_COMPLETE ]; then + # Create symbolic links from /home/tooling/ -> /home/user/ + stow . -t /home/user/ -d /home/tooling/ --no-folding -v 2 > /tmp/stow.log 2>&1 + # Vim does not permit .viminfo to be a symbolic link for security reasons, so manually copy it + cp /home/tooling/.viminfo /home/user/.viminfo + # We have to restore bash-related files back onto /home/user/ (since they will have been overwritten by the PVC) + # but we don't want them to be symbolic links (so that they persist on the PVC) + cp /home/tooling/.bashrc /home/user/.bashrc + cp /home/tooling/.bash_profile /home/user/.bash_profile + touch $STOW_COMPLETE +fi + +exec "$@"