Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cluster Validation using Canonical K8s #35

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .charmcraft_channel
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
2.x/stable
4 changes: 2 additions & 2 deletions .github/workflows/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ on: [pull_request]
jobs:
call-inclusive-naming-check:
name: Inclusive naming
uses: canonical-web-and-design/Inclusive-naming/.github/workflows/woke.yaml@main
uses: canonical/inclusive-naming/.github/workflows/woke.yaml@main
with:
fail-on-error: "true"

Expand All @@ -13,7 +13,7 @@ jobs:
name: Lint Unit
uses: charmed-kubernetes/workflows/.github/workflows/lint-unit.yaml@main
with:
python: "['3.8', '3.9', '3.10', '3.11', '3.12']"
python: "['3.8', '3.10', '3.12']"
needs:
- call-inclusive-naming-check

Expand Down
31 changes: 3 additions & 28 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ Running the end-to-end test suite on Canonical K8s requires integrating the

First, deploy k8s-operator and k8s-worker, and relate them.

```
```shell
juju deploy k8s
juju deploy k8s-worker
juju expose k8s
Expand All @@ -31,34 +31,9 @@ juju integrate k8s k8s-worker:cluster

Next, deploy the kubernetes-e2e charm:

```
```shell
juju deploy kubernetes-e2e
```

Export the kubeconfig file of your Canonical K8s cluster:

```
juju run k8s/0 get-kubeconfig | yq '.kubeconfig' -r > kubeconfig
```

Attach the exported kubeconfig as a resource for the kubernetes-e2e charm:

```
juju attach-resource kubernetes-e2e kubeconfig=./kubeconfig
```

When you attach the kubeconfig resource, the kubernetes-e2e charm places the kubeconfig at /home/ubuntu/.kube/config - this allows kubectl to communicate with your Canonical K8s cluster.

### Running the e2e test

Once the relations have settled, and the `kubernetes-e2e` charm reports
`Ready to test.` - you may kick off an end to end validation test.

Optionally, you can use the `extra` action parameter to specify a kubernetes
context to be used.

```
juju run kubernetes-e2e/0 test --wait 2h extra='-context k8s'
juju integrate kubernetes-e2e:kube-control k8s:kube-control
HomayoonAlimohammadi marked this conversation as resolved.
Show resolved Hide resolved
```

## Usage with Charmed Kubernetes
Expand Down
6 changes: 3 additions & 3 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
ops ~= 2.5
git+https://github.com/charmed-kubernetes/interface-kube-control.git@main#subdirectory=ops
git+https://github.com/charmed-kubernetes/interface-tls-certificates.git@main#subdirectory=ops
ops == 2.17.1
ops.interface-kube-control @ git+https://github.com/charmed-kubernetes/interface-kube-control.git@edc07bce7ea4c25d472fa4d95834602a7ebce5cd#subdirectory=ops
ops.interface-tls-certificates @ git+https://github.com/charmed-kubernetes/interface-tls-certificates.git@4a1081da098154b96337a09c8e9c40acff2d330e#subdirectory=ops
49 changes: 23 additions & 26 deletions src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,6 @@

import ops
from charms.operator_libs_linux.v2 import snap
from ops import (
ActionEvent,
BlockedStatus,
EventBase,
MaintenanceStatus,
WaitingStatus,
)
from ops.interface_kube_control import KubeControlRequirer
from ops.interface_tls_certificates import CertificatesRequires

Expand Down Expand Up @@ -74,7 +67,7 @@ class KubernetesE2ECharm(ops.CharmBase):

def __init__(self, *args) -> None:
super().__init__(*args)
self.kube_control = KubeControlRequirer(self)
self.kube_control = KubeControlRequirer(self, schemas="0,1")
self.certificates = CertificatesRequires(self)

self.CA_CERT_PATH.parent.mkdir(exist_ok=True)
Expand All @@ -92,23 +85,23 @@ def __init__(self, *args) -> None:
self.framework.observe(self.on.test_action, self._on_test_action)
self.framework.observe(self.on.config_changed, self._setup_environment)

def _kube_control_relation_joined(self, event: EventBase):
self.kube_control.set_auth_request(self.unit.name)
def _kube_control_relation_joined(self, event: ops.EventBase):
self.kube_control.set_auth_request(self.unit.name, "system:masters")
return self._setup_environment(event)

def _ensure_kube_control_relation(self, event: EventBase) -> bool:
self.unit.status = MaintenanceStatus("Evaluating kubernetes authentication.")
def _ensure_kube_control_relation(self, event: ops.EventBase) -> bool:
self.unit.status = ops.MaintenanceStatus("Evaluating kubernetes authentication.")
evaluation = self.kube_control.evaluate_relation(event)
if evaluation:
if "Waiting" in evaluation:
self.unit.status = WaitingStatus(evaluation)
self.unit.status = ops.WaitingStatus(evaluation)
else:
self.unit.status = BlockedStatus(evaluation)
self.unit.status = ops.BlockedStatus(evaluation)
return False
if not self.kube_control.get_auth_credentials(self.unit.name):
self.unit.status = WaitingStatus("Waiting for kube-control: unit credentials.")
self.unit.status = ops.WaitingStatus("Waiting for kube-control: unit credentials.")
return False
self.unit.status = MaintenanceStatus("Kubernetes authentication completed.")
self.unit.status = ops.MaintenanceStatus("Kubernetes authentication completed.")
self.kube_control.create_kubeconfig(
self.CA_CERT_PATH, "/root/.kube/config", "root", self.unit.name
)
Expand All @@ -117,19 +110,23 @@ def _ensure_kube_control_relation(self, event: EventBase) -> bool:
)
return True

def _ensure_certificates_relation(self, event: EventBase) -> bool:
self.unit.status = MaintenanceStatus("Evaluating certificates.")
def _ensure_certificates_relation(self, event: ops.EventBase) -> bool:
if self.kube_control.get_ca_certificate():
logger.info("CA Certificate is available from kube-control.")
return True
Comment on lines +114 to +116
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we get a CA certificate from the kube-control relation, we don’t need to write the CA cert from the TLS relation, right? However, I notice we are using CA_CERT_PATH to create the kubeconfig (L112,L115). Am I missing something here? Could you clarify if I’m misunderstanding this?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I hope I can. The CA cert CAN come from kube-control over the v2 schema. In v1 schema the CA cert won't be there. So, to maintain backwards API here the caller must provide a path where the CA certs will be if they aren't in the relation data.

Realistically:
kubernetes-control-plane doesn't provide the CA cert
k8s does provide the CA cert

when the ca cert is in the relation, this path CA_CERT_PATH is ignored.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

details from the library:

    def create_kubeconfig(
        self, ca: PathLike, kubeconfig: PathLike, user: str, k8s_user: str
    ):
        ... Skipped ...
        if ca_content := self.get_ca_certificate():
            ca_b64 = base64.b64encode(ca_content).decode("utf-8")
        elif Path(ca).exists():
            ca_b64 = base64.b64encode(Path(ca).read_bytes()).decode("utf-8")
        else:
            log.error("No CA certificate found")
            raise FileNotFoundError("No CA certificate found")


self.unit.status = ops.MaintenanceStatus("Evaluating certificates.")
evaluation = self.certificates.evaluate_relation(event)
if evaluation:
if "Waiting" in evaluation:
self.unit.status = WaitingStatus(evaluation)
self.unit.status = ops.WaitingStatus(evaluation)
else:
self.unit.status = BlockedStatus(evaluation)
self.unit.status = ops.BlockedStatus(evaluation)
return False
self.CA_CERT_PATH.write_text(self.certificates.ca)
return True

def _setup_environment(self, event: EventBase) -> None:
def _setup_environment(self, event: ops.EventBase) -> None:
kubeconfig_resource_manager = KubeConfigResourceManager(self.model)

if kubeconfig_resource_manager.is_valid_kubeconfig_resource():
Expand All @@ -141,7 +138,7 @@ def _setup_environment(self, event: EventBase) -> None:
if not self._ensure_kube_control_relation(event):
return

channel = self.config.get("channel")
channel = str(self.config.get("channel"))
self._install_snaps(channel)

self.unit.status = ops.ActiveStatus("Ready to test.")
Expand All @@ -152,14 +149,14 @@ def _install_snaps(self, channel: Optional[str]) -> None:
snap.ensure("kubernetes-test", snap.SnapState.Latest.value, channel=channel, classic=True)
self.unit.status = ops.MaintenanceStatus("Snaps installed successfully.")

def _check_kube_config_exists(self, event: ActionEvent) -> bool:
def _check_kube_config_exists(self, event: ops.ActionEvent) -> bool:
if not Path(KUBE_CONFIG_PATH).exists():
event.fail("Missing Kubernetes configuration. See logs for info.")
event.log("Relate to the certificate authority and kubernetes-control-plane.")
return False
return True

def _log_has_errors(self, event: ActionEvent) -> bool:
def _log_has_errors(self, event: ops.ActionEvent) -> bool:
log_file_path = Path(f"/home/ubuntu/{event.id}.log")

if not log_file_path.exists():
Expand All @@ -170,7 +167,7 @@ def _log_has_errors(self, event: ActionEvent) -> bool:

return "Test Suite Failed" in log_file_path.read_text()

def _on_test_action(self, event: ActionEvent) -> None:
def _on_test_action(self, event: ops.ActionEvent) -> None:
def param_get(p):
return str(event.params.get(p, ""))

Expand All @@ -186,7 +183,7 @@ def param_get(p):
logger.info("Running scripts/test.sh: %s", "".join(command))

previous_status = self.unit.status
self.unit.status = MaintenanceStatus("Tests running...")
self.unit.status = ops.MaintenanceStatus("Tests running...")

# Let check=False so the log and process return code can be checked below.
process = subprocess.run(command, capture_output=False, check=False)
Expand Down
Loading