diff --git a/charts/README.md b/charts/README.md
index a603bb2..b9b24f7 100644
--- a/charts/README.md
+++ b/charts/README.md
@@ -7,6 +7,7 @@
 * [Let's Encrypt](letsencrypt/README.md)
 * [NFS-Ganesha](nfs-ganesha/README.md)
 * [NFS Server Provisioner](nfs-server-provisioner/README.md)
+* [Portworx BBQ](portworx-bbq/README.md)
 * [Rancher Cluster Template](rancher-cluster-templates/README.md)
 * [WordPress](wordpress/README.md)
 
diff --git a/charts/ds389/Chart.yaml b/charts/ds389/Chart.yaml
new file mode 100644
index 0000000..83e34cc
--- /dev/null
+++ b/charts/ds389/Chart.yaml
@@ -0,0 +1,12 @@
+apiVersion: v2
+name: ds389-helm-chart
+description: A Helm chart for deploying the 389 Directory Server
+version: 1.0.0
+appVersion: "1.0"
+maintainers:
+  - name: rmahique
+    email: raul.mahiques@suse.com
+keywords:
+  - ldap
+  - '389'
+  - '636'
diff --git a/charts/ds389/templates/secrets.yaml b/charts/ds389/templates/secrets.yaml
new file mode 100644
index 0000000..7bb1002
--- /dev/null
+++ b/charts/ds389/templates/secrets.yaml
@@ -0,0 +1,17 @@
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: dirsrv-tls-secret
+#  namespace: {{ .Values.ds389.nsName }}
+data:
+  tls.key: {{ .Values.ds389.tlsKey | b64enc | quote }}
+  tls.crt: {{ .Values.ds389.tlsCert | b64enc | quote }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: dirsrv-dm-password
+#  namespace: {{ .Values.ds389.nsName }}
+data:
+  dm-password: {{ .Values.ds389.dmPassword | quote }}
diff --git a/charts/ds389/templates/service-external.yaml b/charts/ds389/templates/service-external.yaml
new file mode 100644
index 0000000..ff09ae6
--- /dev/null
+++ b/charts/ds389/templates/service-external.yaml
@@ -0,0 +1,22 @@
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app: {{ .Values.ds389.name }}
+  name: {{ .Values.ds389.name }}-external-svc
+#  namespace: {{ .Values.ds389.nsName }}
+spec:
+  ports:
+    - name: {{ .Values.ds389.name }}-port
+      port: {{ .Values.ds389.internalPort }}
+      protocol: TCP
+      targetPort: {{ .Values.ds389.internalPort }}
+      nodePort: {{ .Values.ds389.nodePort }}
+    - name: {{ .Values.ds389.name }}-tls-port
+      port: {{ .Values.ds389.tlsPort }}
+      protocol: TCP
+      targetPort: {{ .Values.ds389.tlsPort }}
+      nodePort: {{ .Values.ds389.nodePortTls }}
+  selector:
+    app: {{ .Values.ds389.name }}
+  type: NodePort
diff --git a/charts/ds389/templates/service-internal.yaml b/charts/ds389/templates/service-internal.yaml
new file mode 100644
index 0000000..73176ba
--- /dev/null
+++ b/charts/ds389/templates/service-internal.yaml
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app: {{ .Values.ds389.name }}
+  name: {{ .Values.ds389.name }}-internal-svc
+#  namespace: {{ .Values.ds389.nsName }}
+spec:
+  clusterIP: None
+  ports:
+    - name: {{ .Values.ds389.name }}-port
+      port: {{ .Values.ds389.internalPort }}
+      protocol: TCP
+      targetPort: {{ .Values.ds389.internalPort }}
+    - name: {{ .Values.ds389.name }}-tls-port
+      port: {{ .Values.ds389.tlsPort }}
+      protocol: TCP
+      targetPort: {{ .Values.ds389.tlsPort }}
+  selector:
+    app: {{ .Values.ds389.name }}
+  type: ClusterIP
diff --git a/charts/ds389/templates/serviceaccount.yaml b/charts/ds389/templates/serviceaccount.yaml
new file mode 100644
index 0000000..516cf7a
--- /dev/null
+++ b/charts/ds389/templates/serviceaccount.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ .Values.ds389.name }}-sa
+#  namespace: {{ .Values.ds389.nsName }}
diff --git a/charts/ds389/templates/statefulset.yaml b/charts/ds389/templates/statefulset.yaml
new file mode 100644
index 0000000..e546584
--- /dev/null
+++ b/charts/ds389/templates/statefulset.yaml
@@ -0,0 +1,93 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: {{ .Values.ds389.name }}
+#  namespace: {{ .Values.ds389.nsName }}
+spec:
+  serviceName: {{ .Values.ds389.name }}-internal-svc
+  replicas: 1
+  selector:
+    matchLabels:
+      app: {{ .Values.ds389.name }}
+  template:
+    metadata:
+      labels:
+        app: {{ .Values.ds389.name }}
+    spec:
+      serviceAccountName: {{ .Values.ds389.name }}-sa
+      securityContext:
+        fsGroup: 499
+      initContainers:
+        - name: {{ .Values.ds389.name }}-init-container
+          image: busybox
+          command: ["/bin/sh", "-c", "chown -R 499:499 /data"]
+          volumeMounts:
+            - name: {{ .Values.ds389.name }}-data
+              mountPath: /data
+      containers:
+        - name: dirsrv-container
+          image: {{ .Values.ds389.image }}
+          lifecycle:
+            postStart:
+              exec:
+                command: ["/bin/sh", "-c", "sleep 60;
+                         dsconf localhost backend create --suffix {{ .Values.ds389.rootDN }} --be-name userroot --create-suffix --create-entries ;
+                         dsconf localhost pwpolicy set --pwdscheme=CRYPT-SHA512 ;
+                         dsconf localhost config replace nsslapd-rootpwstoragescheme=CRYPT-SHA512 ;
+                         dsconf localhost config replace nsslapd-rootpw={{ .Values.ds389.dm_pwd }} ;
+                         dsconf localhost plugin referential-integrity enable ;
+                         dsconf localhost plugin memberof enable ;
+                         dsconf localhost config replace nsslapd-allow-anonymous-access=off ;
+                         dsidm localhost --basedn {{ .Values.ds389.rootDN }} user create --uid ldap_user --cn ldap_user --displayName ldap_user --uidNumber 1001 --gidNumber 1001 --homeDirectory /home/ldap_user ;
+                         dsidm localhost -b {{ .Values.ds389.rootDN }}  account change_password uid=ldap_user,ou=people,{{ .Values.ds389.rootDN }} {{ .Values.ds389.users_pwd }} ;
+                         dsidm localhost --basedn {{ .Values.ds389.rootDN }} user create --uid developer --cn developer --displayName developer --uidNumber 1002 --gidNumber 1002 --homeDirectory /home/developer ;
+                         dsidm localhost -b {{ .Values.ds389.rootDN }} account change_password uid=developer,ou=people,{{ .Values.ds389.rootDN }} {{ .Values.ds389.users_pwd }} ;
+                         dsidm localhost --basedn {{ .Values.ds389.rootDN }} group create --cn developers;
+                         dsidm localhost -b {{ .Values.ds389.rootDN }} group add_member developers uid=developer,ou=people,{{ .Values.ds389.rootDN }}
+                         "]
+          env:
+            - name: DS_DM_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: dirsrv-dm-password
+                  key: dm-password
+            - name: DS_SUFFIX_NAME
+              value: "{{ .Values.ds389.rootDN }}"
+            - name: DS_ERRORLOG_LEVEL
+              value: "8192"
+            - name: DS_MEMORY_PERCENTAGE
+              value: "10"
+            - name: DS_REINDEX
+              value: "True"
+            - name: DS_STARTUP_TIMEOUT
+              value: "120"
+          ports:
+            - containerPort: {{ .Values.ds389.internalPort }}
+              protocol: TCP
+            - containerPort: {{ .Values.ds389.tlsPort }}
+              protocol: TCP
+          securityContext:
+            runAsUser: 489
+          volumeMounts:
+            - name: {{ .Values.ds389.name }}-data
+              mountPath: "/data"
+            - name: dirsrv-tls
+              mountPath: '/data/tls/'
+              readOnly: true
+      volumes:
+        - name: dirsrv-tls
+          secret:
+            secretName: dirsrv-tls-secret
+            items:
+              - key: tls.key
+                path: server.key
+              - key: tls.crt
+                path: server.crt
+  volumeClaimTemplates:
+    - metadata:
+        name: {{ .Values.ds389.name }}-data
+      spec:
+        accessModes: [ "ReadWriteOnce" ]
+        resources:
+          requests:
+            storage: {{ .Values.ds389.vcSize }}
diff --git a/charts/ds389/values.yaml b/charts/ds389/values.yaml
new file mode 100644
index 0000000..bed3109
--- /dev/null
+++ b/charts/ds389/values.yaml
@@ -0,0 +1,15 @@
+# Default values for ds389-helm-chart
+ds389:
+  nsName: "ds389"
+  name: "ds389"
+  image: "docker.io/389ds/dirsrv"
+  tlsKey: "LS0tLS1CRUdJTiBFTkNSWVBURUQgUFJJVkFURSBLRVktLS0tLQpNSUlKbnpCSkJna3Foa2lHOXcwQkJRMHdQREFiQmdrcWhraUc5dzBCQlF3d0RnUUlMZmtpMDkwcnZsb0NBZ2dBCk1CMEdDV0NHU0FGbEF3UUJLZy4uLkdOWWM3aTlTVkRCb0E9PQotLS0tLUVORCBFTkNSWVBURUQgUFJJVkFURSBLRVktLS0tLQ=="
+  tlsCert: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZ4akNDQTY0Q0NRQ05UK2VQMnZqSnh6QU5CZ2txaGtpRzl3MEJBUXNGQURDQnBERUxNQWtHQTFVRUJoTUMKUmxJeEVqQVFCZ05WQkFnTUMuLi51ZEp3RTdIbm5BN2xwQQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t"
+  dmPassword: "YWRtaW4xMjM="
+  rootDN: "dc=mydemo,dc=lab"
+  userPassword: "supersecret123"
+  vcSize: "5Gi"
+  internalPort: 3389
+  tlsPort: 3636
+  nodePort: 30389
+  nodePortTls: 30636
diff --git a/scripts/README.md b/scripts/README.md
index 4334e49..f27ab74 100644
--- a/scripts/README.md
+++ b/scripts/README.md
@@ -2,24 +2,45 @@
 
 ## Bash functions
 
+### Instruqt
+
+Name                        | Source
+----------------------------|-------------------------------------
+`instruqt_wait_hoststartup` | [instruqt/host.sh](instruqt/host.sh)
+
+### K3s
+
+Name                  | Source
+----------------------|-----------------------------------------------------
+`k3s_copy_kubeconfig` | [k3s/cluster_lifecycle.sh](k3s/cluster_lifecycle.sh)
+`k3s_create_cluster`  | [k3s/cluster_lifecycle.sh](k3s/cluster_lifecycle.sh)
+
+### Kubernetes
+
+Name                                  | Source
+--------------------------------------|-----------------------------------------------------------------------------
+`k8s_create_letsencryptclusterissuer` | [kubernetes/certificate_management.sh](kubernetes/certificate_management.sh)
+`k8s_install_certmanager`             | [kubernetes/certificate_management.sh](kubernetes/certificate_management.sh)
+`k8s_wait_fornodesandpods`            | [kubernetes/cluster_status.sh](kubernetes/cluster_status.sh)
+
+### Keycloak
+
+Name                   | Source
+-----------------------|---------------------------------------------------------
+`keycloak_login`       | [authentication/keycloak.sh](authentication/keycloak.sh)
+`keycloak_create_user` | [authentication/keycloak.sh](authentication/keycloak.sh)
+`keycloak_delete_user` | [authentication/keycloak.sh](authentication/keycloak.sh)
+
+### Linux
+
+Name                         | Source
+-----------------------------|-------------------------------
+`linux_create_fileAndLoopDevice` | [linux/disk.sh](linux/disk.sh)
+
+### Rancher
+
 Name                                           | Source
------------------------------------------------|-----------------------------------------------------------------------------
-`instruqt_wait_hoststartup`                    | [instruqt/host.sh](instruqt/host.sh)
-`k3s_copy_kubeconfig`                          | [k3s/cluster_lifecycle.sh](k3s/cluster_lifecycle.sh)
-`k3s_create_cluster`                           | [k3s/cluster_lifecycle.sh](k3s/cluster_lifecycle.sh)
-`k8s_create_letsencryptclusterissuer`          | [kubernetes/certificate_management.sh](kubernetes/certificate_management.sh)
-`k8s_install_certmanager`                      | [kubernetes/certificate_management.sh](kubernetes/certificate_management.sh)
-`k8s_wait_fornodesandpods`                     | [kubernetes/cluster_status.sh](kubernetes/cluster_status.sh)
-`keycloak_login`                               | [authentication/keycloak.sh](authentication/keycloak.sh)
-`keycloak_create_user`                         | [authentication/keycloak.sh](authentication/keycloak.sh)
-`keycloak_delete_user`                         | [authentication/keycloak.sh](authentication/keycloak.sh)
-`observability_check_stackpack`                | [observability/stackpack.sh](observability/stackpack.sh)
-`observability_create_ingestion_api_key`       | [observability/api_key.sh](observability/api_key.sh)
-`observability_delete_ingestion_api_key`       | [observability/api_key.sh](observability/api_key.sh)
-`observability_delete_stackpack`               | [observability/stackpack.sh](observability/stackpack.sh)
-`observability_get_component_snapshot`         | [observability/stql.sh](observability/stql.sh)
-`observability_get_component_state`            | [observability/stql.sh](observability/stql.sh)
-`observability_install_cli`                    | [observability/cli.sh](observability/cli.sh)
+-----------------------------------------------|-------------------------------------------------------------
 `rancher_create_apikey`                        | [rancher/user_actions.sh](rancher/user_actions.sh)
 `rancher_create_customcluster`                 | [rancher/cluster_actions.sh](rancher/cluster_actions.sh)
 `rancher_first_login`                          | [rancher/manager_lifecycle.sh](rancher/manager_lifecycle.sh)
@@ -31,11 +52,29 @@ Name                                           | Source
 `rancher_update_password`                      | [rancher/user_actions.sh](rancher/user_actions.sh)
 `rancher_update_serverurl`                     | [rancher/manager_settings.sh](rancher/manager_settings.sh)
 `rancher_wait_capiready`                       | [rancher/manager_lifecycle.sh](rancher/manager_lifecycle.sh)
-`suselinux_install_git`                        | [suselinux/packages.sh](suselinux/packages.sh)
-`suselinux_install_helm`                       | [suselinux/packages.sh](suselinux/packages.sh)
-`suselinux_install_kubectl`                    | [suselinux/packages.sh](suselinux/packages.sh)
-`suselinux_install_podman`                     | [suselinux/packages.sh](suselinux/packages.sh)
-`suselinux_register_cloudguest`                | [suselinux/registration.sh](suselinux/registration.sh)
+
+### SUSE Observability
+
+Name                                     | Source
+-----------------------------------------|---------------------------------------------------------
+`observability_check_stackpack`          | [observability/stackpack.sh](observability/stackpack.sh)
+`observability_create_ingestion_api_key` | [observability/api_key.sh](observability/api_key.sh)
+`observability_delete_ingestion_api_key` | [observability/api_key.sh](observability/api_key.sh)
+`observability_delete_stackpack`         | [observability/stackpack.sh](observability/stackpack.sh)
+`observability_get_component_snapshot`   | [observability/stql.sh](observability/stql.sh)
+`observability_get_component_state`      | [observability/stql.sh](observability/stql.sh)
+`observability_install_cli`              | [observability/cli.sh](observability/cli.sh)
+
+### SUSE Linux (previously SLES, SLE Micro)
+
+Name                            | Source
+--------------------------------|-------------------------------------------------------
+`suselinux_install_git`         | [suselinux/packages.sh](suselinux/packages.sh)
+`suselinux_install_helm`        | [suselinux/packages.sh](suselinux/packages.sh)
+`suselinux_install_kubectl`     | [suselinux/packages.sh](suselinux/packages.sh)
+`suselinux_install_openiscsi`   | [suselinux/packages.sh](suselinux/packages.sh)
+`suselinux_install_podman`      | [suselinux/packages.sh](suselinux/packages.sh)
+`suselinux_register_cloudguest` | [suselinux/registration.sh](suselinux/registration.sh)
 
 ## Concrete examples
 
diff --git a/scripts/authentication/ds389.sh b/scripts/authentication/ds389.sh
new file mode 100644
index 0000000..855ee49
--- /dev/null
+++ b/scripts/authentication/ds389.sh
@@ -0,0 +1,135 @@
+#!/bin/bash
+# This library contains some functions to use and setup 389
+# directory server ( https://www.port389.org/index.html )
+# which is an "enterprise-class Open Source LDAP server for Linux.". 
+# SPDX-License-Identifier: GPL-3.0-only or GPL-3.0-or-later
+# 
+# Copyright (C) 2024 Raul Mahiques
+#
+# This program is free software: you can redistribute it and/or modify
+#  it under the terms of the GNU General Public License as published by
+#  the Free Software Foundation, either version 3 of the License, or
+#  (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+# For more details find a copy of the license here: 
+# https://www.gnu.org/licenses/gpl-3.0.txt
+#
+
+
+
+#######################################
+# DS389 - restrict permissions:
+#   - prevent normal users from reading the whole directory
+# Arguments:
+#   1 - _ldap_uri
+#   2 - _ldap_basedn
+#   3 - _admin_user
+#   4 - _admin_pwd
+# Examples:
+#   ds389_restrict_permissions "<_ldap_uri>" "<_ldap_basedn>" "<_admin_user>" "<_admin_pwd>"
+#######################################
+function ds389_restrict_permissions() {
+  local _ldap_uri="$1"
+  local _ldap_basedn="$2"
+  local _admin_user="$3"
+  local _admin_pwd="$4"
+  ldapmodify -D "${_admin_user}" -w "${_admin_pwd}" -x  -H "${_ldap_uri}" << EOL
+dn: ou=people,${_ldap_basedn}
+changetype: modify
+delete: aci
+aci: (targetattr="objectClass || description || nsUniqueId || uid || displayName || loginShell || uidNumber || gidNumber || gecos || homeDirectory || cn || memberOf || mail || nsSshPublicKey || nsAccountLock || userCertificate")(targetfilter="(objectClass=posixaccount)")(version 3.0; acl "Enable anyone user read"; allow (read, search, compare)(userdn="ldap:///anyone");)
+ 
+dn: ou=people,${_ldap_basedn}
+changetype: modify
+add: aci
+aci: (targetattr="objectClass || description || nsUniqueId || uid || displayName || loginShell || uidNumber || gidNumber || gecos || homeDirectory || cn || memberOf || mail || nsSshPublicKey || nsAccountLock || userCertificate")(targetfilter="(objectClass=posixaccount)")(version 3.0; acl "Enable self user read"; allow (read, search, compare)(userdn="ldap:///self");)
+EOL
+}
+
+#######################################
+# DS389 - Grant user privileges to read the whole directory
+# Arguments:
+#   1 - _ldap_uri
+#   2 - _ldap_basedn
+#   3 - _admin_user
+#   4 - _admin_pwd
+#   5 - Username (Default: ldap_user)
+# Examples:
+#   ds389_user_private_read "ldap://ldap.mydemo.lab:389" "dc=mydemo,dc=lab" "cn=Directory Manager" "secret" "ldap_user"
+#######################################
+function ds389_ldap_user_user_private_read() {
+  local _ldap_uri="$1"
+  local _ldap_basedn="$2"
+  local _admin_user="$3"
+  local _admin_pwd="$4"
+  local ldap_user="$5"
+  ldapmodify -D "${_admin_user}" -w "${_admin_pwd}" -x  -H "${_ldap_uri}" << EOL
+dn: cn=user_private_read,ou=permissions,${_ldap_basedn}
+changetype: modify
+add: member
+member: uid=${ldap_user},ou=people,${_ldap_basedn}
+EOL
+}
+
+#######################################
+# DS389 - Verify user has access
+# Arguments:
+#   1 - ldap user DN
+#   2 - ldap user pwd
+#   3 - _ldap_uri
+#   4 - _ldap_basedn
+# Examples:
+#   ds389_ldap_user_access_check "cn=Directory Manager" "secret"  "uid=ldap_user,ou=people,dc=mydemo,dc=lab" "mypassword"
+#######################################
+function ds389_ldap_user_access_check() {
+  local _ldap_user_dn="${1}"
+  local _ldap_user_pwd="${2}"
+  local _ldap_uri="${3}"
+  local _ldap_basedn="${4}"
+  ldapsearch -x  -D "${_ldap_user_dn}" -w "${_ldap_user_pwd}" -H "${_ldap_uri}"  -b "${_ldap_basedn}"
+}
+
+#######################################
+# DS389 - Install 389 Directory server
+# Arguments:
+#   1 - _ldap_uri
+#   2 - _ldap_basedn
+#   3 - _admin_user
+#   4 - _admin_pwd
+# Examples:
+#   ds389_install "ldap://ldap.mydemo.lab:389" "dc=mydemo,dc=lab" "cn=Directory Manager" "secret"
+#######################################
+function ds389_install() {
+  local _ldap_uri="${1}"
+  local _ldap_basedn="${2}"
+  local _admin_user="${3}"
+  local _admin_pwd="${4}"
+  # add the repo
+  helm repo add suse-lab-setup https://opensource.suse.com/lab-setup
+  helm repo update
+  # installs the chart with default parameters
+  if [[ -f values.yaml ]]
+  then
+    helm upgrade --install ds389  --namespace ds389 suse-lab-setup/ds389 -f values.yaml
+  else
+    helm upgrade --install ds389  --namespace ds389 suse-lab-setup/ds389
+  fi
+  sleep 60
+  ds389_restrict_permissions "${_ldap_uri}" "${_ldap_basedn}" "${_admin_user}" "${_admin_pwd}"
+  ds389_ldap_user_user_private_read "${_ldap_uri}" "${_ldap_basedn}" "${_admin_user}" "${_admin_pwd}" "ldap_user"
+}
+
+#######################################
+# DS389 - Uninstall 389 Directory server
+# Examples:
+#   ds389_uninstall
+#######################################
+function ds389_uninstall() {
+  helm uninstall ds389
+  sleep 15 
+}
diff --git a/scripts/k3s/cluster_lifecycle.sh b/scripts/k3s/cluster_lifecycle.sh
index 04791a5..fb10dd5 100644
--- a/scripts/k3s/cluster_lifecycle.sh
+++ b/scripts/k3s/cluster_lifecycle.sh
@@ -11,7 +11,7 @@
 k3s_create_cluster() {
   local version=$1
 
-  echo "Create management cluster (K3s)..."
+  echo 'Create management cluster (K3s)...'
   curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL="${version}" K3S_KUBECONFIG_MODE="644" sh -
 }
 
diff --git a/scripts/kubernetes/certificate_management.sh b/scripts/kubernetes/certificate_management.sh
index 05325a2..ec8bb86 100644
--- a/scripts/kubernetes/certificate_management.sh
+++ b/scripts/kubernetes/certificate_management.sh
@@ -11,7 +11,7 @@
 k8s_install_certmanager() {
   local version=$1
 
-  echo "Installing cert-manager..."
+  echo 'Installing cert-manager...'
   helm repo add jetstack https://charts.jetstack.io
   helm repo update
   kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/${version}/cert-manager.crds.yaml
diff --git a/scripts/kubernetes/cluster_status.sh b/scripts/kubernetes/cluster_status.sh
index 634c6de..1fa95ee 100644
--- a/scripts/kubernetes/cluster_status.sh
+++ b/scripts/kubernetes/cluster_status.sh
@@ -11,13 +11,13 @@
 k8s_wait_fornodesandpods() {
   # checks nodes are ready
   while ! kubectl get nodes --no-headers 2>/dev/null | grep -q .; do
-    echo "Waiting for nodes to be available..."
+    echo 'Waiting for nodes to be available...'
     sleep 5
   done
   while true; do
     NOT_READY_NODES=$(kubectl get nodes --no-headers 2>/dev/null | grep -v " Ready" | wc -l)
     if [ "$NOT_READY_NODES" -eq 0 ]; then
-      echo "All nodes are ready."
+      echo 'All nodes are ready.'
       break
     else
       sleep 5
@@ -26,13 +26,13 @@ k8s_wait_fornodesandpods() {
 
   # checks pods are completed or running
   while ! kubectl get pods --all-namespaces --no-headers 2>/dev/null | grep -q .; do
-    echo "Waiting for pods to be available..."
+    echo 'Waiting for pods to be available...'
     sleep 5
   done
   while true; do
     NOT_READY_PODS=$(kubectl get pods --all-namespaces --field-selector=status.phase!=Running,status.phase!=Succeeded --no-headers 2>/dev/null | wc -l)
     if [ "$NOT_READY_PODS" -eq 0 ]; then
-      echo "All pods are in Running or Completed status."
+      echo 'All pods are in Running or Completed status.'
       break
     else
       sleep 5
diff --git a/scripts/linux/disk.sh b/scripts/linux/disk.sh
new file mode 100644
index 0000000..b53eb10
--- /dev/null
+++ b/scripts/linux/disk.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Collection of functions to work with disks on Linux
+
+#######################################
+# Create a file and binds it to a loop device
+# Arguments:
+#   File name
+#   Size file (number of blocks)
+#   Look device
+# Examples:
+#   linux_create_fileAndLoopDevice '/loop-file1' '10240' '/dev/loop1'
+#######################################
+linux_create_fileAndLoopDevice() {
+  local fileName=$1
+  local sizeFile=$2
+  local loopDevice=$3
+
+  echo "Creating local file ${fileName} and loop device ${fileName}..."
+
+  # prepares the file for use with loopback device (creates a file filled with zero bytes)
+  dd if=/dev/zero of=${fileName} bs=1M count=${sizeFile} status=progress
+
+  # binds the file to the loop device (enabling to work with the file as if it were a block device, like a physical disk)
+  losetup ${loopDevice} ${fileName}
+}
diff --git a/scripts/observability/agent.sh b/scripts/observability/agent.sh
new file mode 100644
index 0000000..b6a4c1c
--- /dev/null
+++ b/scripts/observability/agent.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+#######################################
+# Install the Observability agent in the cluster
+# Arguments:
+#   url (SUSE Observability)
+#   cluster_name
+#   ingestion_api_key
+# Examples:
+#   observability_agent_install https://obs.suse.com demo xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+#######################################
+observability_agent_install() {
+    local url=$1
+    local cluster_name=$2
+    local ingestion_api_key=$3
+    echo "Installing Observability agent..."
+    helm repo add suse-observability https://charts.rancher.com/server-charts/prime/suse-observability
+    helm repo update
+
+    helm upgrade --install suse-observability-agent suse-observability/suse-observability-agent \
+        --namespace suse-observability --create-namespace \
+        --set stackstate.apiKey=${ingestion_api_key} \
+        --set stackstate.url="${url%/}/receiver/stsAgent" \
+        --set stackstate.cluster.name=${cluster_name}
+
+    kubectl wait pods -n suse-observability -l app.kubernetes.io/instance=suse-observability-agent --for condition=Ready 2>/dev/null
+}
diff --git a/scripts/observability/stackpack.sh b/scripts/observability/stackpack.sh
index 24816b1..9d72262 100644
--- a/scripts/observability/stackpack.sh
+++ b/scripts/observability/stackpack.sh
@@ -47,3 +47,48 @@ observability_check_stackpack() {
   [[ -n "$stackpack_id" ]]
   return
 }
+
+#######################################
+# Install a StackPack instance in SUSE Observability
+# Arguments:
+#   url (SUSE Observability)
+#   service_token (SUSE Observability)
+#   cluster_name
+# Examples:
+#   observability_install_stackpack https://obs.suse.com/ xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx demo
+#######################################
+observability_install_stackpack() {
+  local url=$1
+  local service_token=$2
+  local cluster_name=$3
+
+  local stackpacks
+  stackpacks=$(/usr/local/bin/sts stackpack list-instances --name kubernetes-v2 -o json --url $url --service-token $service_token)
+  if [[ $(echo $stackpacks | jq -r '.instances[] | select(.config.kubernetes_cluster_name == "'$cluster_name'") | .id') ]]; then
+    echo ">>> StackPack for cluster '${cluster_name}' already exists"
+  else
+    /usr/local/bin/sts stackpack install --name kubernetes-v2 --url $url --service-token $service_token -p "kubernetes_cluster_name=$cluster_name" --unlocked-strategy fail
+    echo ">>> StackPack for cluster '${cluster_name}' installed"
+  fi
+}
+
+#######################################
+# Get the status of a StackPack instance in SUSE Observability
+# Arguments:
+#   url (SUSE Observability)
+#   service_token (SUSE Observability)
+#   cluster_name
+# Output:
+#   The status of the StackPack instance
+# Examples:
+#   observability_stackpack_status https://obs.suse.com/ xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx demo
+#######################################
+observability_stackpack_status() {
+  local url=$1
+  local service_token=$2
+  local cluster_name=$3
+
+  local stackpacks
+  stackpacks=$(/usr/local/bin/sts stackpack list-instances --name kubernetes-v2 -o json --url $url --service-token $service_token)
+  echo $stackpacks | jq -r '.instances[] | select(.config.kubernetes_cluster_name == "'$cluster_name'") | .status'
+}
diff --git a/scripts/rancher/cluster_actions.sh b/scripts/rancher/cluster_actions.sh
index 21f073c..fd4374b 100644
--- a/scripts/rancher/cluster_actions.sh
+++ b/scripts/rancher/cluster_actions.sh
@@ -7,27 +7,27 @@
 #   rancher_list_clusters
 #######################################
 rancher_list_clusters() {
-  echo "Listing clusters registered in Rancher..."
+  echo 'Listing clusters registered in Rancher...'
   kubectl get clusters.provisioning.cattle.io --all-namespaces -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}'
 }
 
 #######################################
-# Create downstream custom cluster in Rancher
+# Create downstream custom cluster in Rancher (don't wait and retrieve name)
 # Globals:
 #   CLUSTER_ID
 # Arguments:
 #   name
 #   version (Kubernetes)
 # Examples:
-#   rancher_create_customcluster demo 'v1.27.16+rke2r1'
+#   rancher_create_customcluster_nowait demo 'v1.27.16+rke2r1'
 #######################################
-rancher_create_customcluster() {
+rancher_create_customcluster_nowait() {
   local name=$1
   local version=$2
 
   rancher_wait_capiready
 
-  echo "Creating downstream cluster in Rancher..."
+  echo 'Creating downstream cluster in Rancher...'
   cat <<EOF | kubectl apply -f -
 apiVersion: provisioning.cattle.io/v1
 kind: Cluster
@@ -81,12 +81,42 @@ spec:
         skipWaitForDeleteTimeoutSeconds: 0
         timeout: 120
 EOF
+}
+
+#######################################
+# Create downstream custom cluster in Rancher
+# Globals:
+#   CLUSTER_ID
+# Arguments:
+#   name
+#   version (Kubernetes)
+# Examples:
+#   rancher_create_customcluster demo 'v1.27.16+rke2r1'
+#######################################
+rancher_create_customcluster() {
+  local name=$1
+  local version=$2
+
+  rancher_create_customcluster_nowait $name $version
 
   sleep 10
 
   rancher_get_clusterid $name
 }
 
+#######################################
+# Return cluster ID from its name
+# Arguments:
+#   name
+# Examples:
+#   CLUSTER_ID=$(rancher_get_clusterid demo)
+#######################################
+rancher_return_clusterid() {
+  local name=$1
+
+  kubectl get cluster.provisioning.cattle.io -n fleet-default -o=jsonpath="{range .items[?(@.metadata.name==\"${name}\")]}{.status.clusterName}{end}"
+}
+
 #######################################
 # Get cluster ID from its name
 # Globals:
@@ -99,10 +129,23 @@ EOF
 rancher_get_clusterid() {
   local name=$1
 
-  CLUSTER_ID=$(kubectl get cluster.provisioning.cattle.io -n fleet-default -o=jsonpath="{range .items[?(@.metadata.name==\"${name}\")]}{.status.clusterName}{end}")
+  CLUSTER_ID=$(rancher_return_clusterid $name)
   echo "DEBUG CLUSTER_ID=${CLUSTER_ID}"
 }
 
+#######################################
+# Return cluster registration command line from Rancher
+# Arguments:
+#   cluster ID
+# Examples:
+#   CLUSTER_REGISTRATION_COMMAND=$(rancher_get_clusterregistrationcommand 42)
+#######################################
+rancher_return_clusterregistrationcommand() {
+  local id=$1
+
+  kubectl get clusterregistrationtoken.management.cattle.io -n $id -o=jsonpath='{.items[*].status.nodeCommand}'
+}
+
 #######################################
 # Get cluster registration command line from Rancher
 # Globals:
@@ -115,6 +158,6 @@ rancher_get_clusterid() {
 rancher_get_clusterregistrationcommand() {
   local id=$1
 
-  REGISTRATION_COMMAND=$(kubectl get clusterregistrationtoken.management.cattle.io -n $id -o=jsonpath='{.items[*].status.nodeCommand}')
+  REGISTRATION_COMMAND=$(rancher_return_clusterregistrationcommand $id)
   echo "DEBUG REGISTRATION_COMMAND=${REGISTRATION_COMMAND}"
 }
diff --git a/scripts/rancher/manager_lifecycle.sh b/scripts/rancher/manager_lifecycle.sh
index b687b53..180ee48 100644
--- a/scripts/rancher/manager_lifecycle.sh
+++ b/scripts/rancher/manager_lifecycle.sh
@@ -19,7 +19,7 @@ rancher_install_withcertmanagerclusterissuer() {
   local hostname=$4
   local clusterissuer=$5
 
-  echo "Installing Rancher..."
+  echo 'Installing Rancher...'
   helm repo add rancher-${repository} https://releases.rancher.com/server-charts/${repository}
   helm repo update
   helm upgrade --install rancher rancher-${repository}/rancher --namespace cattle-system --create-namespace \
@@ -31,7 +31,7 @@ rancher_install_withcertmanagerclusterissuer() {
     --set ingress.tls.secretName=rancher-tls \
     --set agentTLSMode="system-store"
   kubectl wait pods -n cattle-system -l app=rancher --for condition=Ready --timeout=180s
-  echo "Waiting for Rancher web app to be running with a valid certificate..."
+  echo 'Waiting for Rancher web app to be running with a valid certificate...'
   while ! kubectl get secret rancher-tls --namespace cattle-system 2>/dev/null; do sleep 1; done
   sleep 10
 }
@@ -48,7 +48,7 @@ rancher_first_login() {
   local rancherUrl=$1
   local newPassword=$2
 
-  echo "Do first login on Rancher..."
+  echo 'Do first login on Rancher...'
   BOOTSTRAP_PASSWORD=$(kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{.data.bootstrapPassword|base64decode}}{{ "\n" }}')
   echo "DEBUG BOOTSTRAP_PASSWORD=${BOOTSTRAP_PASSWORD}"
   rancher_login_withpassword $rancherUrl 'admin' $BOOTSTRAP_PASSWORD
@@ -67,14 +67,14 @@ rancher_first_login() {
 rancher_wait_capiready() {
   while true; do
     status=$(kubectl get deployment capi-controller-manager -n cattle-provisioning-capi-system -o jsonpath='{.status.conditions[?(@.type=="Available")].status}' 2>/dev/null)
-    if [ "$status" == "True" ]; then
-      echo "Deployment capi-controller-manager is available"
+    if [ "$status" == 'True' ]; then
+      echo 'Deployment capi-controller-manager is available'
       break
     fi
     sleep 10
   done
-  while [[ $(kubectl get endpoints capi-webhook-service -n cattle-provisioning-capi-system -o jsonpath='{.subsets}' 2>/dev/null) == "" ]]; do
+  while [[ $(kubectl get endpoints capi-webhook-service -n cattle-provisioning-capi-system -o jsonpath='{.subsets}' 2>/dev/null) == '' ]]; do
     sleep 10
   done
-  echo "Service capi-webhook-service is ready"
+  echo 'Service capi-webhook-service is ready'
 }
diff --git a/scripts/rancher/manager_settings.sh b/scripts/rancher/manager_settings.sh
index 29de830..37c23a1 100644
--- a/scripts/rancher/manager_settings.sh
+++ b/scripts/rancher/manager_settings.sh
@@ -11,6 +11,6 @@
 rancher_update_serverurl() {
   local rancherUrl=$1
 
-  echo "Sets Rancher URL in settings..."
+  echo 'Sets Rancher URL in settings...'
   kubectl patch settings.management.cattle.io server-url --type='merge' --patch '{ "value": "'$rancherUrl'" }'
 }
diff --git a/scripts/rancher/user_actions.sh b/scripts/rancher/user_actions.sh
index 3dded56..623c43e 100644
--- a/scripts/rancher/user_actions.sh
+++ b/scripts/rancher/user_actions.sh
@@ -44,7 +44,7 @@ rancher_update_password() {
   local currentPassword=$3
   local newPassword=$4
 
-  echo "Updates Rancher user password..."
+  echo 'Updates Rancher user password...'
   curl -s -k -H "Authorization: Bearer $token" \
     -H 'Content-Type: application/json' \
     -X POST \
@@ -71,7 +71,7 @@ rancher_create_apikey() {
   local token=$2
   local description=$3
 
-  echo "Creates a Rancher API Key..."
+  echo 'Creates a Rancher API Key...'
   API_KEY_RESPONSE=$(curl -s -k "$rancherUrl/v3/tokens" \
     -H 'Content-Type: application/json' \
     -H "Authorization: Bearer $token" \
diff --git a/scripts/suselinux/packages.sh b/scripts/suselinux/packages.sh
index 2f98fdb..fb46814 100644
--- a/scripts/suselinux/packages.sh
+++ b/scripts/suselinux/packages.sh
@@ -46,6 +46,19 @@ suselinux_install_git() {
   zypper install -y git
 }
 
+#######################################
+# Install open-iscsi on SUSE Linux
+# Examples:
+#   suselinux_install_openiscsi
+#######################################
+suselinux_install_openiscsi() {
+  zypper --gpg-auto-import-keys -q refresh
+  zypper --gpg-auto-import-keys -q install -y open-iscsi
+  systemctl -q enable iscsid
+  systemctl start iscsid
+  modprobe iscsi_tcp
+}
+
 #######################################
 # Install Podman on SUSE Linux
 # Examples: