Skip to content

Commit

Permalink
Add tests for kine
Browse files Browse the repository at this point in the history
Signed-off-by: Vitor Savian <[email protected]>
  • Loading branch information
vitorsavian committed Oct 31, 2024
1 parent 3af06a5 commit 745becb
Show file tree
Hide file tree
Showing 4 changed files with 418 additions and 1 deletion.
157 changes: 157 additions & 0 deletions tests/e2e/kine/Vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,157 @@
ENV['VAGRANT_NO_PARALLEL'] = ENV['E2E_STANDUP_PARALLEL'] ? nil : 'no'
NODE_ROLES = (ENV['E2E_NODE_ROLES'] ||
["server-0", "server-1", "agent-0"])
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['bento/ubuntu-24.04', 'bento/ubuntu-24.04', 'bento/ubuntu-24.04', 'bento/ubuntu-24.04'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
HARDENED = (ENV['E2E_HARDENED'] || "")
EXTERNAL_DB = (ENV['E2E_EXTERNAL_DB'] || "postgres")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 3072).to_i
CNI = (ENV['E2E_CNI'] || "canal") # canal, cilium and calico supported
REGISTRY = (ENV['E2E_REGISTRY'] || "")
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks,
# see https://www.virtualbox.org/manual/ch06.html#network_hostonly
NETWORK_PREFIX = "10.10.10"

def provision(vm, role, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = "#{role[0]}-#{role_num}"
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
node_ip = "#{NETWORK_PREFIX}.#{100+node_num}"
vm.network "private_network", ip: node_ip, netmask: "255.255.255.0"

scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts"
vagrant_defaults = File.exist?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
load vagrant_defaults

defaultOSConfigure(vm)
db_type = getDBType(role, role_num, vm)

if !HARDENED.empty?
cisPrep(vm)
end
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)

vm.provision "shell", inline: "ping -c 2 rke2.io"

if role.include?("server") && role_num == 0
vm.provision :rke2, run: 'once' do |rke2|
rke2.env = %W[INSTALL_RKE2_TYPE=server #{install_type}]
rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
rke2.config = <<~YAML
write-kubeconfig-mode: '0644'
node-external-ip: #{NETWORK_PREFIX}.100
node-ip: #{NETWORK_PREFIX}.100
token: vagrant-rke2
cni: #{CNI}
#{db_type}
YAML
end
elsif role.include?("server") && role_num != 0
vm.provision :rke2, run: 'once' do |rke2|
rke2.env = %W[INSTALL_RKE2_TYPE=server #{install_type}]
rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
rke2.config = <<~YAML
write-kubeconfig-mode: '0644'
node-external-ip: #{node_ip}
node-ip: #{node_ip}
server: https://#{NETWORK_PREFIX}.100:9345
token: vagrant-rke2
cni: #{CNI}
#{db_type}
YAML
end
end

if role.include?("agent")
vm.provision :rke2, run: 'once' do |rke2|
rke2.env = %W[INSTALL_RKE2_TYPE=agent #{install_type}]
rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
rke2.install_path = false
rke2.config = <<~YAML
write-kubeconfig-mode: '0644'
node-external-ip: #{node_ip}
node-ip: #{node_ip}
server: https://#{NETWORK_PREFIX}.100:9345
token: vagrant-rke2
YAML
end
end
end

def getDBType(role, role_num, vm)
if EXTERNAL_DB == "mariadb"
if role.include?("server") && role_num == 0
dockerInstall(vm)
vm.provision "shell", inline: "docker run -d -p 3306:3306 --name #{EXTERNAL_DB} -e MARIADB_ROOT_PASSWORD=e2e mariadb:11"
vm.provision "shell", inline: "echo \"Wait for mariaDB to startup\"; sleep 10"
return "datastore-endpoint: 'mysql://root:e2e@tcp(#{NETWORK_PREFIX}.100:3306)/rke2'"
elsif role.include?("server") && role_num != 0
return "datastore-endpoint: 'mysql://root:e2e@tcp(#{NETWORK_PREFIX}.100:3306)/rke2'"
end

elsif EXTERNAL_DB == "mysql"
if role.include?("server") && role_num == 0
dockerInstall(vm)
vm.provision "shell", inline: "docker run -d -p 3306:3306 --name #{EXTERNAL_DB} -e MYSQL_ROOT_PASSWORD=e2e mysql:5.7"
vm.provision "shell", inline: "echo \"Wait for mysql to startup\"; sleep 10"
return "datastore-endpoint: 'mysql://root:e2e@tcp(#{NETWORK_PREFIX}.100:3306)/rke2'"
elsif role.include?("server") && role_num != 0
return "datastore-endpoint: 'mysql://root:e2e@tcp(#{NETWORK_PREFIX}.100:3306)/rke2'"
end

elsif EXTERNAL_DB == "postgres"
if role.include?("server") && role_num == 0
dockerInstall(vm)
vm.provision "shell", type: "shell", inline: "docker run -d -p 5432:5432 --name postgres -e POSTGRES_PASSWORD=e2e postgres:14-alpine"
vm.provision "shell", inline: "echo \"Wait for postgres to startup\"; sleep 10"
return "datastore-endpoint: 'postgres://postgres:e2e@#{NETWORK_PREFIX}.100:5432/rke2?sslmode=disable'"
elsif role.include?("server") && role_num != 0
return "datastore-endpoint: 'postgres://postgres:e2e@#{NETWORK_PREFIX}.100:5432/rke2?sslmode=disable'"
end

elsif EXTERNAL_DB == "sqlite"
if role.include?("server") && role_num == 0
return "--disable-etcd: true"
end
elsif ( EXTERNAL_DB == "none" )
if role.include?("server") && role_num == 0
# Will use etcd
end
else
puts "Unknown EXTERNAL_DB: " + EXTERNAL_DB
abort
end
return ""
end


Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-rke2", "vagrant-reload"]
# Default provider is libvirt, virtualbox is only provided as a backup
config.vm.provider "libvirt" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end
config.vm.provider "virtualbox" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end

if NODE_ROLES.kind_of?(String)
NODE_ROLES = NODE_ROLES.split(" ", -1)
end
if NODE_BOXES.kind_of?(String)
NODE_BOXES = NODE_BOXES.split(" ", -1)
end

NODE_ROLES.each_with_index do |name, i|
config.vm.define name do |node|
roles = name.split("-", -1)
role_num = roles.pop.to_i
provision(node.vm, roles, role_num, i)
end
end
end
238 changes: 238 additions & 0 deletions tests/e2e/kine/kine_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,238 @@
package kine

import (
"flag"
"fmt"
"os"
"strings"
"testing"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/rancher/rke2/tests/e2e"
)

// Valid nodeOS: bento/ubuntu-24.04, opensuse/Leap-15.6.x86_64
var nodeOS = flag.String("nodeOS", "bento/ubuntu-24.04", "VM operating system")
var serverCount = flag.Int("serverCount", 2, "number of server nodes")
var agentCount = flag.Int("agentCount", 1, "number of agent nodes")
var ci = flag.Bool("ci", false, "running on CI")

// Environment Variables Info:
// E2E_CNI=(canal|cilium|calico)
// E2E_RELEASE_VERSION=v1.23.1+rke2r1 or nil for latest commit from master

func Test_E2EKineValidation(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
suiteConfig, reporterConfig := GinkgoConfiguration()
RunSpecs(t, "Kine Test Suite", suiteConfig, reporterConfig)
}

var (
kubeConfigFile string
serverNodeNames []string
agentNodeNames []string
)
var _ = ReportAfterEach(e2e.GenReport)

var _ = Describe("Verify Basic Cluster Creation with Kine", Ordered, func() {
It("Starts up kine with no issues", func() {
var err error
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})

It("Checks Node Status", func() {
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParseNodes(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})

It("Checks Pod Status", func() {
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParsePods(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})

It("Verifies ClusterIP Service", func() {
_, err := e2e.DeployWorkload("clusterip.yaml", kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
return e2e.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-clusterip"))

clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc", false)
cmd := "curl -L --insecure http://" + clusterip + "/name.html"
for _, nodeName := range serverNodeNames {
Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
}
})
It("Verifies NodePort Service", func() {
_, err := e2e.DeployWorkload("nodeport.yaml", kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
for _, nodeName := range serverNodeNames {
nodeExternalIP, err := e2e.FetchNodeExternalIP(nodeName)
Expect(err).NotTo(HaveOccurred())
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
nodeport, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
cmd = "curl -L --insecure http://" + nodeExternalIP + ":" + nodeport + "/name.html"
Eventually(func() (string, error) {
return e2e.RunCommand(cmd)
}, "5s", "1s").Should(ContainSubstring("test-nodeport"), "failed cmd: "+cmd)
cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
Eventually(func() (string, error) {
return e2e.RunCommand(cmd)
}, "120s", "5s").Should(ContainSubstring("test-nodeport"), "failed cmd: "+cmd)
}
})

It("Verifies LoadBalancer Service", func() {
_, err := e2e.DeployWorkload("loadbalancer.yaml", kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
ip, err := e2e.FetchNodeExternalIP(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "Loadbalancer manifest not deployed")
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
port, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())

cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
Eventually(func() (string, error) {
return e2e.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"))

cmd = "curl -L --insecure http://" + ip + ":" + port + "/name.html"
Eventually(func() (string, error) {
return e2e.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"), "failed cmd: "+cmd)
})

It("Verifies Ingress", func() {
_, err := e2e.DeployWorkload("ingress.yaml", kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
for _, nodeName := range serverNodeNames {
ip, _ := e2e.FetchNodeExternalIP(nodeName)
cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html"
Eventually(func() (string, error) {
return e2e.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-ingress"))
}
})

It("Verifies Daemonset", func() {
_, err := e2e.DeployWorkload("daemonset.yaml", kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
Expect(err).NotTo(HaveOccurred())

Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
count := e2e.CountOfStringInSlice("test-daemonset", pods)
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
}, "240s", "10s").Should(Succeed())
})

It("Verifies dns access", func() {
_, err := e2e.DeployWorkload("dnsutils.yaml", kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
Eventually(func() (string, error) {
return e2e.RunCommand(cmd)
}, "120s", "2s").Should(ContainSubstring("kubernetes.default.svc.cluster.local"))
})

It("Verify Local Path Provisioner storage ", func() {
_, err := e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile)
Expect(err).NotTo(HaveOccurred())

Eventually(func() (string, error) {
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + kubeConfigFile
return e2e.RunCommand(cmd)
}, "120s", "2s").Should(MatchRegexp(`local-path-pvc.+Bound`))

Eventually(func() (string, error) {
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
return e2e.RunCommand(cmd)
}, "420s", "2s").Should(MatchRegexp(`volume-test.+Running`))

cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'"
_, err = e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())

cmd = "kubectl delete pod volume-test --kubeconfig=" + kubeConfigFile
_, err = e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())

_, err = e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile)
Expect(err).NotTo(HaveOccurred())

Eventually(func() (string, error) {
cmd = "kubectl --kubeconfig=" + kubeConfigFile + " exec volume-test -- cat /data/test"
return e2e.RunCommand(cmd)
}, "180s", "2s").Should(ContainSubstring("local-path-test"))
})

Context("Validate restart", func() {
It("Restarts normally", func() {
errRestart := e2e.RestartCluster(append(serverNodeNames, agentNodeNames...))
Expect(errRestart).NotTo(HaveOccurred(), "Restart Nodes not happened correctly")

Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
pods, _ := e2e.ParsePods(kubeConfigFile, false)
count := e2e.CountOfStringInSlice("test-daemonset", pods)
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
podsRunningAr := 0
for _, pod := range pods {
if strings.Contains(pod.Name, "test-daemonset") && pod.Status == "Running" && pod.Ready == "1/1" {
podsRunningAr++
}
}
g.Expect(len(nodes)).Should((Equal(podsRunningAr)), "Daemonset pods are not running after the restart")
}, "1120s", "5s").Should(Succeed())
})
})
})

var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})

var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}
})
Loading

0 comments on commit 745becb

Please sign in to comment.