Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Test ci #17

Open
wants to merge 2 commits into
base: e2e_sp_0
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
163 changes: 137 additions & 26 deletions test/e2e/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,19 @@ import (
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/remotecommand"

"github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/common"
"github.com/vmware-tanzu/nsx-operator/test/e2e/providers"
)

const (
defaultTimeout = 90 * time.Second
defaultTimeout = 100 * time.Second
)

type Status int

const (
Ready Status = iota
Deleted
)

type ClusterNode struct {
Expand Down Expand Up @@ -349,15 +357,22 @@ type PodCondition func(*corev1.Pod) (bool, error)

// waitForSecurityPolicyReady polls the K8s apiServer until the specified SecurityPolicy is in the "True" state (or until
// the provided timeout expires).
func (data *TestData) waitForSecurityPolicyReady(timeout time.Duration, name, namespace string) error {
func (data *TestData) waitForSecurityPolicyReadyOrDeleted(timeout time.Duration, namespace string, name string, status Status) error {
err := wait.Poll(1*time.Second, timeout, func() (bool, error) {
cmd := fmt.Sprintf("kubectl get securitypolicy %s -n %s -o jsonpath='{.status.conditions[?(@.type==\"Ready\")].status}'", name, namespace)
log.Printf("%s", cmd)
rc, stdout, _, err := RunCommandOnNode(clusterInfo.masterNodeName, cmd)
if err != nil || rc != 0 {
if status == Deleted {
return true, nil
}
return false, fmt.Errorf("error when running the following command `%s` on master Node: %v, %s", cmd, err, stdout)
} else {
if stdout == "True" {
return true, nil
if status == Ready {
if stdout == "True" {
return true, nil
}
return false, nil
}
return false, nil
}
Expand Down Expand Up @@ -387,15 +402,6 @@ func (data *TestData) podWaitFor(timeout time.Duration, name, namespace string,
return data.clientset.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{})
}

// podWaitForRunning polls the k8s apiServer until the specified Pod is in the "running" state (or
// until the provided timeout expires).
func (data *TestData) podWaitForRunning(timeout time.Duration, name, namespace string) error {
_, err := data.podWaitFor(timeout, name, namespace, func(pod *corev1.Pod) (bool, error) {
return pod.Status.Phase == corev1.PodRunning, nil
})
return err
}

// podWaitForIPs polls the K8s apiServer until the specified Pod is in the "running" state (or until
// the provided timeout expires). The function then returns the IP addresses assigned to the Pod. If the
// Pod is not using "hostNetwork", the function also checks that an IP address exists in each required
Expand Down Expand Up @@ -440,6 +446,39 @@ func (data *TestData) podWaitForIPs(timeout time.Duration, name, namespace strin
return ips, nil
}

// deploymentWaitForIPsOrNames polls the K8s apiServer until the specified Pod in deployment has an IP address
func (data *TestData) deploymentWaitForIPsOrNames(timeout time.Duration, namespace, deployment string) ([]string, []string, error) {
podIPStrings := sets.NewString()
var podNames []string
opt := metav1.ListOptions{
LabelSelector: "deployment=" + deployment,
}
err := wait.Poll(1*time.Second, timeout, func() (bool, error) {
if pods, err := data.clientset.CoreV1().Pods(namespace).List(context.TODO(), opt); err != nil {
if errors.IsNotFound(err) {
return false, nil
}
return false, fmt.Errorf("error when getting Pod %v", err)
} else {
for _, p := range pods.Items {
if p.Status.Phase != corev1.PodRunning {
return false, nil
} else if p.Status.PodIP == "" {
return false, nil
} else {
podIPStrings.Insert(p.Status.PodIP)
podNames = append(podNames, p.Name)
}
}
return true, nil
}
})
if err != nil {
return nil, nil, err
}
return podIPStrings.List(), podNames, nil
}

func parsePodIPs(podIPStrings sets.String) (*PodIPs, error) {
ips := new(PodIPs)
for idx := range podIPStrings.List() {
Expand Down Expand Up @@ -496,15 +535,15 @@ func (data *TestData) runCommandFromPod(namespace string, podName string, contai
Stdout: &stdoutB,
Stderr: &stderrB,
}); err != nil {
log.Printf("Error running command: %v, stdout: %s, stderr: %s", err, stdoutB.String(), stderrB.String())
log.Printf("Error when running command '%s' in Pod '%s/%s' container '%s': %v", strings.Join(cmd, " "), namespace, podName, containerName, err)
return stdoutB.String(), stderrB.String(), err
}
log.Printf("Running command, stdout: %s, stderr: %s", stdoutB.String(), stderrB.String())
outStr, errStr := stdoutB.String(), stderrB.String()
log.Printf("Command '%s' in Pod '%s/%s' container '%s' returned with output: '%s' and error: '%s'", strings.Join(cmd, " "), namespace, podName, containerName, outStr, errStr)
return stdoutB.String(), stderrB.String(), nil
}

func (data *TestData) runPingCommandFromTestPod(namespace string, podName string, targetPodIPs *PodIPs, count int) error {

func (data *TestData) runPingCommandFromPod(namespace string, podName string, targetPodIPs *PodIPs, count int) error {
var cmd []string
if targetPodIPs.ipv4 != nil {
cmd = []string{"ping", "-c", strconv.Itoa(count), targetPodIPs.ipv4.String()}
Expand All @@ -521,33 +560,105 @@ func (data *TestData) runPingCommandFromTestPod(namespace string, podName string
return nil
}

func (data *TestData) runNetcatCommandFromTestPod(namespace string, podName string, server string, port int) error {
// Retrying several times to avoid flakes as the test may involve DNS (coredns) and Service/Endpoints (kube-proxy).
func (data *TestData) runNetcatCommandFromPod(namespace string, podName string, containerName string, server string, port int) error {
cmd := []string{
"/bin/sh",
"-c",
fmt.Sprintf("for i in $(seq 1 5); do nc -vz -w 4 %s %d && exit 0 || sleep 1; done; exit 1",
fmt.Sprintf("for i in $(seq 1 5); do nc -w 4 %s %d && exit 0 || sleep 1; done; exit 1",
server, port),
}
stdout, stderr, err := data.runCommandFromPod(namespace, podName, podName, cmd)
if err == nil {
return nil
_, _, err := data.runCommandFromPod(namespace, podName, containerName, cmd)
if err != nil {
return err
}
return fmt.Errorf("nc stdout: <%v>, stderr: <%v>, err: <%v>", stdout, stderr, err)
return nil
}

func applyYAML(filename string, ns string) error {
cmd := fmt.Sprintf("kubectl apply -f %s -n %s", filename, ns)
if ns == "" {
cmd = fmt.Sprintf("kubectl apply -f %s", filename)
}
var stdout, stderr bytes.Buffer
command := exec.Command("bash", "-c", cmd)
log.Printf("Applying YAML file %s", cmd)
log.Printf("Applying YAML file %s", filename)
command.Stdout = &stdout
command.Stderr = &stderr
err := command.Run()
if err != nil {
log.Printf("Error when applying YAML file %s: %v", filename, err)
return err
}
outStr, errStr := string(stdout.Bytes()), string(stderr.Bytes())
log.Printf("stdout: %s, stderr: %s", outStr, errStr)
log.Printf("YAML file %s applied with output: '%s' and error: '%s'", cmd, outStr, errStr)
return nil
}

func runCommand(cmd string) (string, error) {
err := wait.Poll(1*time.Second, defaultTimeout, func() (bool, error) {
var stdout, stderr bytes.Buffer
command := exec.Command("bash", "-c", cmd)
log.Printf("Running command %s", cmd)
command.Stdout = &stdout
command.Stderr = &stderr
err := command.Run()
if err != nil {
log.Printf("Error when running command %s: %v", cmd, err)
return false, nil
}
outStr, errStr := string(stdout.Bytes()), string(stderr.Bytes())
log.Printf("Command %s returned with output: '%s' and error: '%s'", cmd, outStr, errStr)
if errStr != "" {
return false, nil
}
return true, nil
})
return "", err
}

func deleteYAML(filename string, ns string) error {
cmd := fmt.Sprintf("kubectl delete -f %s -n %s", filename, ns)
if ns == "" {
cmd = fmt.Sprintf("kubectl delete -f %s", filename)
}
var stdout, stderr bytes.Buffer
command := exec.Command("bash", "-c", cmd)
log.Printf("Deleting YAML file (%s)", filename)
command.Stdout = &stdout
command.Stderr = &stderr
err := command.Run()
if err != nil {
log.Printf("Error when deleting YAML file %s: %v", filename, err)
return nil
}
outStr, errStr := string(stdout.Bytes()), string(stderr.Bytes())
log.Printf("YAML file %s deleted with output: '%s' and error: '%s'", cmd, outStr, errStr)
return nil
}

func (data *TestData) waitForResourceExistOrNot(namespace string, resourceType string, resourceName string, shouldExist bool) error {
err := wait.Poll(1*time.Second, defaultTimeout, func() (bool, error) {
exist := true
tagScopeClusterKey := strings.Replace(common.TagScopeNamespace, "/", "\\/", -1)
tagScopeClusterValue := strings.Replace(namespace, ":", "\\:", -1)
tagParam := fmt.Sprintf("tags.scope:%s AND tags.tag:%s", tagScopeClusterKey, tagScopeClusterValue)
resourceParam := fmt.Sprintf("%s:%s AND display_name:*%s*", common.ResourceType, resourceType, resourceName)
queryParam := resourceParam + " AND " + tagParam
var cursor *string = nil
var pageSize int64 = 200
response, err := testData.nsxClient.QueryClient.List(queryParam, cursor, nil, &pageSize, nil, nil)
if err != nil {
log.Printf("Error when querying resource %s/%s: %v", resourceType, resourceName, err)
return false, err
}
if len(response.Results) == 0 {
exist = false
}
//log.Printf("QueryParam: %s Result: %t", queryParam, exist)
if exist != shouldExist {
return false, nil
}
return true, nil
})
return err
}
5 changes: 4 additions & 1 deletion test/e2e/main_test.go
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
// e2e test for nsx-operator, in nsx_operator directory, run command below:
// e2e=true go test -v github.com/vmware-tanzu/nsx-operator/test/e2e -remote.sshconfig /root/.ssh/config -remote.kubeconfig /root/. kube/config -operator-cfg-path /etc/nsx-ujo/ncp.ini -test.timeout 15m
// Note: set a reasonable timeout when running e2e tests, otherwise the test will be terminated by the framework.
package e2e

import (
Expand Down Expand Up @@ -46,7 +49,7 @@ func testMain(m *testing.M) int {
flag.StringVar(&testOptions.providerName, "provider", "remote", "K8s test cluster provider")
flag.StringVar(&testOptions.providerConfigPath, "provider-cfg-path", "", "Optional config file for provider")
flag.StringVar(&testOptions.logsExportDir, "logs-export-dir", "", "Export directory for test logs")
flag.StringVar(&testOptions.operatorConfigPath, "operator-cfg-path", "/etc/nsx-operator/nsxop.ini", "config file for operator")
flag.StringVar(&testOptions.operatorConfigPath, "operator-cfg-path", "/etc/nsx-ujo/ncp.ini", "config file for operator")
flag.BoolVar(&testOptions.logsExportOnSuccess, "logs-export-on-success", false, "Export logs even when a test is successful")
flag.BoolVar(&testOptions.withIPPool, "ippool", false, "Run tests include IPPool tests")
flag.Parse()
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
apiVersion: v1
kind: Pod
metadata:
name: client-a
labels:
k1: a1
k2: b1
user: internal
spec:
restartPolicy: Never
containers:
- name: client-a
image: "harbor-repo.vmware.com/dockerhub-proxy-cache/humanux/\
http_https_echo:latest"
command: ["/bin/bash"]
args: ["-c", "while true; do echo hello, world!; sleep 60; done"]
---
apiVersion: v1
kind: Pod
metadata:
name: client-b
labels:
k1: a1
k2: b4
user: internal
spec:
restartPolicy: Never
containers:
- name: client-b
image: "harbor-repo.vmware.com/dockerhub-proxy-cache/humanux/\
http_https_echo:latest"
command: ["/bin/bash"]
args: ["-c", "while true; do echo hello, world!; sleep 60; done"]
---
apiVersion: v1
kind: Pod
metadata:
name: pod-a
spec:
restartPolicy: Never
containers:
- name: pod-a
image: "harbor-repo.vmware.com/dockerhub-proxy-cache/humanux/\
http_https_echo:latest"
command: ["/bin/bash"]
args: ["-c", "while true; do echo hello, world!; sleep 60; done"]
16 changes: 16 additions & 0 deletions test/e2e/manifest/testSecurityPolicy/match-expression.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
apiVersion: nsx.vmware.com/v1alpha1
kind: SecurityPolicy
metadata:
name: expression-policy-1
namespace: test-security-policy-match-expression
spec:
appliedTo:
- podSelector:
matchLabels:
user: internal
matchExpressions:
- {key: k2, operator: NotIn, values: [b1,b2,b3]}
- {key: k1, operator: In, values: [a1,a2,a3]}
rules:
- direction: Out
action: drop
12 changes: 12 additions & 0 deletions test/e2e/manifest/testSecurityPolicy/ns-isolation-policy-1.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
apiVersion: nsx.vmware.com/v1alpha1
kind: SecurityPolicy
metadata:
name: isolate-policy-1
spec:
priority: 20
appliedTo:
- podSelector: {}
- vmSelector: {}
rules:
- direction: in
action: drop
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
# test: kubectl exec -ti client -n client -- curl web ip
apiVersion: v1
kind: Pod
metadata:
name: client
labels:
role: client
namespace: client
spec:
containers:
- name: client
image: "harbor-repo.vmware.com/dockerhub-proxy-cache/library/photon:3.0-20210507"
command: [ "/bin/bash" ]
args: [ "-c", "while true; do echo hello, world!; sleep 60; done" ]
restartPolicy: Never
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: tcp-deployment
namespace: web
spec:
replicas: 2
selector:
matchLabels:
role: web
template:
metadata:
labels:
deployment: tcp-deployment
role: web
spec:
hostname: web-deployment
containers:
- name: web
image: "harbor-repo.vmware.com/dockerhub-proxy-cache/humanux/http_https_echo:latest"
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
name: web-port
---
apiVersion: nsx.vmware.com/v1alpha1
kind: SecurityPolicy
metadata:
name: named-port-policy
namespace: web
spec:
priority: 10
appliedTo:
- podSelector:
matchLabels:
role: web
rules:
- direction: in
action: allow
ports:
- protocol: TCP
port: web-port
- direction: in
action: drop
- direction: out
action: drop
Loading