From 3693512cd927f5918a427b5b6fad52f08c37d535 Mon Sep 17 00:00:00 2001 From: Amir Moualem Date: Tue, 3 Dec 2019 12:32:36 +0200 Subject: [PATCH 1/4] chore: import in proper typescript convention --- test/unit/metadata-extractor.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/unit/metadata-extractor.test.ts b/test/unit/metadata-extractor.test.ts index 91345d7a4..a4d4904fc 100644 --- a/test/unit/metadata-extractor.test.ts +++ b/test/unit/metadata-extractor.test.ts @@ -2,7 +2,7 @@ import * as tap from 'tap'; import { V1OwnerReference, V1Pod } from '@kubernetes/client-node'; -import metadataExtractor = require('../../src/kube-scanner/metadata-extractor'); +import * as metadataExtractor from '../../src/kube-scanner/metadata-extractor'; tap.test('isPodAssociatedWithParent', async (t) => { const mockPodWithoutMetadata = {}; From be9b3b93b03cdad45e1900554b28bc16ffbee019 Mon Sep 17 00:00:00 2001 From: Amir Moualem Date: Tue, 3 Dec 2019 14:22:26 +0200 Subject: [PATCH 2/4] chore: rename KubeObjectMetadata to IKubeObjectMetadata the convention for interface naming we use includes starting their name with a capital I. --- src/kube-scanner/metadata-extractor.ts | 12 ++++++------ src/kube-scanner/types.ts | 2 +- src/kube-scanner/watchers/handlers/workload.ts | 4 ++-- src/kube-scanner/workload-reader.ts | 4 ++-- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/kube-scanner/metadata-extractor.ts b/src/kube-scanner/metadata-extractor.ts index df2122fd6..ea945aab2 100644 --- a/src/kube-scanner/metadata-extractor.ts +++ b/src/kube-scanner/metadata-extractor.ts @@ -1,7 +1,7 @@ import { V1OwnerReference, V1Pod, V1Container, V1ContainerStatus } from '@kubernetes/client-node'; import { IWorkload, ILocalWorkloadLocator } from '../transmitter/types'; import { currentClusterName } from './cluster'; -import { KubeObjectMetadata } from './types'; +import { IKubeObjectMetadata } from './types'; import { getSupportedWorkload, getWorkloadReader } from './workload-reader'; import logger = require('../common/logger'); @@ -10,7 +10,7 @@ const loopingThreshold = 20; // Constructs the workload metadata based on a variety of k8s properties. // https://www.notion.so/snyk/Kubernetes-workload-fields-we-should-collect-c60c8f0395f241978282173f4c133a34 export function buildImageMetadata( - workloadMeta: KubeObjectMetadata, + workloadMeta: IKubeObjectMetadata, containerStatuses: V1ContainerStatus[], ): IWorkload[] { const { kind, objectMeta, specMeta, revision, podSpec } = workloadMeta; @@ -49,9 +49,9 @@ export function buildImageMetadata( async function findParentWorkload( ownerRefs: V1OwnerReference[] | undefined, namespace: string, -): Promise { +): Promise { let ownerReferences = ownerRefs; - let parentMetadata: KubeObjectMetadata | undefined; + let parentMetadata: IKubeObjectMetadata | undefined; for (let i = 0; i < loopingThreshold; i++) { // We are interested only in a subset of all workloads. @@ -76,7 +76,7 @@ async function findParentWorkload( return undefined; } -export function buildWorkloadMetadata(kubernetesMetadata: KubeObjectMetadata): ILocalWorkloadLocator { +export function buildWorkloadMetadata(kubernetesMetadata: IKubeObjectMetadata): ILocalWorkloadLocator { if (!kubernetesMetadata.objectMeta || kubernetesMetadata.objectMeta.namespace === undefined || kubernetesMetadata.objectMeta.name === undefined) { @@ -126,7 +126,7 @@ export async function buildMetadataForWorkload(pod: V1Pod): Promise { +export async function deleteWorkload(kubernetesMetadata: IKubeObjectMetadata, workloadName: string): Promise { try { if (kubernetesMetadata.ownerRefs !== undefined && kubernetesMetadata.ownerRefs.length > 0) { return; diff --git a/src/kube-scanner/workload-reader.ts b/src/kube-scanner/workload-reader.ts index dad842caa..b68b4ee15 100644 --- a/src/kube-scanner/workload-reader.ts +++ b/src/kube-scanner/workload-reader.ts @@ -1,11 +1,11 @@ import { V1OwnerReference } from '@kubernetes/client-node'; import { k8sApi } from './cluster'; -import { KubeObjectMetadata, WorkloadKind } from './types'; +import { IKubeObjectMetadata, WorkloadKind } from './types'; type IWorkloadReaderFunc = ( workloadName: string, namespace: string, -) => Promise; +) => Promise; const deploymentReader: IWorkloadReaderFunc = async (workloadName, namespace) => { const deploymentResult = await k8sApi.appsClient.readNamespacedDeployment( From 46050fdd7259b319ae8437f6085f579c6ecde5cf Mon Sep 17 00:00:00 2001 From: Amir Moualem Date: Tue, 3 Dec 2019 14:40:49 +0200 Subject: [PATCH 3/4] test: discrepancies between owner spec and child statuses metadataExtractor.buildImageMetadata accepts a workload's metadata and a list of container statuses, belonging to containers in a pod running said images. this function attempts to build image metadata based on the owner's spec (for example a deployment), which contains the data about "what should be", as well as the container statuses for that pod, which contain data about "what actually is". some discrepancies may occur between "what should be" and "what actually is". an example we've stumbled upon happens when sidecar containers are injected (through an admission controller), causing the deployment's spec to contain a single container, but the statuses to include the injected containers. this results in an error in buildImageMetadata that relies every container that appears in the statuses list to also appear in the spec. this test proves the bug exists. --- .../sidecar-containers/deployment.yaml | 95 ++++++ test/fixtures/sidecar-containers/pod.yaml | 322 ++++++++++++++++++ test/unit/metadata-extractor.test.ts | 25 +- 3 files changed, 441 insertions(+), 1 deletion(-) create mode 100644 test/fixtures/sidecar-containers/deployment.yaml create mode 100644 test/fixtures/sidecar-containers/pod.yaml diff --git a/test/fixtures/sidecar-containers/deployment.yaml b/test/fixtures/sidecar-containers/deployment.yaml new file mode 100644 index 000000000..ba4980a79 --- /dev/null +++ b/test/fixtures/sidecar-containers/deployment.yaml @@ -0,0 +1,95 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "1" + flux.weave.works/antecedent: security-tools:helmrelease/hello-world + creationTimestamp: "2019-11-25T13:23:51Z" + generation: 2 + labels: + app: hello-world + name: hello-world + namespace: security-tools + resourceVersion: "55787967" + selfLink: /apis/extensions/v1beta1/namespaces/security-tools/deployments/hello-world + uid: d2006330-0f86-11ea-ae05-4201c0a88014 +spec: + progressDeadlineSeconds: 2147483647 + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: hello-world + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + json_logs: "true" + prometheus.io/scrape: "false" + creationTimestamp: null + labels: + app: hello-world + spec: + containers: + - image: eu.gcr.io/cookie/hello-world:1.20191125.132107-4664980 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /hello + port: 8080 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + name: hello-world + ports: + - containerPort: 8080 + name: http + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /hello + port: 8080 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + resources: + limits: + cpu: "2" + memory: 512Mi + requests: + cpu: "1" + memory: 128Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 40500 + runAsUser: 40500 + serviceAccount: hello-world + serviceAccountName: hello-world + terminationGracePeriodSeconds: 30 +status: + availableReplicas: 1 + conditions: + - lastTransitionTime: "2019-11-25T13:23:51Z" + lastUpdateTime: "2019-11-25T13:23:51Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + observedGeneration: 2 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/test/fixtures/sidecar-containers/pod.yaml b/test/fixtures/sidecar-containers/pod.yaml new file mode 100644 index 000000000..07f25f3a8 --- /dev/null +++ b/test/fixtures/sidecar-containers/pod.yaml @@ -0,0 +1,322 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + json_logs: "true" + prometheus.io/scrape: "false" + sidecar.istio.io/status: '{"version":"8e87d1a416a399be3d9fcf1451585bc0f8cc55e28ea8dc3367a0a104473561ef","initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-certs"],"imagePullSecrets":null}' + creationTimestamp: "2019-11-25T13:23:51Z" + finalizers: + - finalizers.gatekeeper.sh/sync + generateName: hello-world-69df7cfb84- + labels: + app: hello-world + pod-template-hash: 69df7cfb84 + name: hello-world-69df7cfb84-whpp5 + namespace: security-tools + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: ReplicaSet + name: hello-world-69df7cfb84 + uid: d2022c91-0f86-11ea-86a6-4201c0a8801a + resourceVersion: "55787962" + selfLink: /api/v1/namespaces/security-tools/pods/hello-world-69df7cfb84-whpp5 + uid: d208d970-0f86-11ea-86a6-4201c0a8801a +spec: + containers: + - image: eu.gcr.io/cookie/hello-world:1.20191125.132107-4664980 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /hello + port: 8080 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + name: hello-world + ports: + - containerPort: 8080 + name: http + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /hello + port: 8080 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + resources: + limits: + cpu: "2" + memory: 512Mi + requests: + cpu: "1" + memory: 128Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: hello-world-token-dltwl + readOnly: true + - args: + - proxy + - sidecar + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --configPath + - /etc/istio/proxy + - --binaryPath + - /usr/local/bin/envoy + - --serviceCluster + - hello-world.$(POD_NAMESPACE) + - --drainDuration + - 45s + - --parentShutdownDuration + - 1m0s + - --discoveryAddress + - istio-pilot.istio-system:15010 + - --zipkinAddress + - zipkin.istio-system:9411 + - --dnsRefreshRate + - 300s + - --connectTimeout + - 10s + - --proxyAdminPort + - "15000" + - --concurrency + - "2" + - --controlPlaneAuthPolicy + - NONE + - --statusPort + - "15020" + - --applicationPorts + - "8080" + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: ISTIO_META_POD_PORTS + value: |- + [ + {"name":"http","containerPort":8080,"protocol":"TCP"} + ] + - name: ISTIO_META_CLUSTER_ID + value: Kubernetes + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.serviceAccountName + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: ISTIO_META_CONFIG_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: SDS_ENABLED + value: "false" + - name: ISTIO_META_INTERCEPTION_MODE + value: REDIRECT + - name: ISTIO_META_INCLUDE_INBOUND_PORTS + value: "8080" + - name: ISTIO_METAJSON_ANNOTATIONS + value: | + {"json_logs":"true","prometheus.io/scrape":"false"} + - name: ISTIO_METAJSON_LABELS + value: | + {"app":"hello-world","pod-template-hash":"69df7cfb84"} + - name: ISTIO_META_WORKLOAD_NAME + value: hello-world + - name: ISTIO_META_OWNER + value: kubernetes://api/apps/v1/namespaces/security-tools/deployments/hello-world + image: docker.io/istio/proxyv2:1.3.4 + imagePullPolicy: IfNotPresent + name: istio-proxy + ports: + - containerPort: 15090 + name: http-envoy-prom + protocol: TCP + readinessProbe: + failureThreshold: 30 + httpGet: + path: /healthz/ready + port: 15020 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: "2" + memory: 1Gi + requests: + cpu: 100m + memory: 128Mi + securityContext: + readOnlyRootFilesystem: true + runAsUser: 1337 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/istio/proxy + name: istio-envoy + - mountPath: /etc/certs/ + name: istio-certs + readOnly: true + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: hello-world-token-dltwl + readOnly: true + dnsPolicy: ClusterFirst + enableServiceLinks: true + initContainers: + - args: + - -p + - "15001" + - -z + - "15006" + - -u + - "1337" + - -m + - REDIRECT + - -i + - '*' + - -x + - "" + - -b + - '*' + - -d + - "15020" + image: docker.io/istio/proxy_init:1.3.4 + imagePullPolicy: IfNotPresent + name: istio-init + resources: + limits: + cpu: 100m + memory: 50Mi + requests: + cpu: 10m + memory: 10Mi + securityContext: + capabilities: + add: + - NET_ADMIN + runAsNonRoot: false + runAsUser: 0 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + nodeName: gke-staging-1-n1-standard-32-93d1d8b9-6t57 + priority: 0 + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 40500 + runAsUser: 40500 + serviceAccount: hello-world + serviceAccountName: hello-world + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 300 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 300 + volumes: + - name: hello-world-token-dltwl + secret: + defaultMode: 420 + secretName: hello-world-token-dltwl + - emptyDir: + medium: Memory + name: istio-envoy + - name: istio-certs + secret: + defaultMode: 420 + optional: true + secretName: istio.hello-world +status: + conditions: + - lastProbeTime: null + lastTransitionTime: "2019-11-25T13:23:54Z" + status: "True" + type: Initialized + - lastProbeTime: null + lastTransitionTime: "2019-11-25T13:24:10Z" + status: "True" + type: Ready + - lastProbeTime: null + lastTransitionTime: "2019-11-25T13:24:10Z" + status: "True" + type: ContainersReady + - lastProbeTime: null + lastTransitionTime: "2019-11-25T13:23:51Z" + status: "True" + type: PodScheduled + containerStatuses: + - containerID: docker://475bb2de6388165714fcfe0ee0c3c6270a9aafef39c035db56d9a854e7747671 + image: eu.gcr.io/cookie/hello-world:1.20191125.132107-4664980 + imageID: docker-pullable://eu.gcr.io/cookie/hello-world@sha256:1ac413b2756364b7b856c64d557fdedb97a4ba44ca16fc656e08881650848fe2 + lastState: {} + name: hello-world + ready: true + restartCount: 0 + state: + running: + startedAt: "2019-11-25T13:24:02Z" + - containerID: docker://818d8c0c4a4d88c1ce7865f5cbb1dddb237d1bf2392058f402956db33d1187f7 + image: istio/proxyv2:1.3.4 + imageID: docker-pullable://istio/proxyv2@sha256:54a563895566adc0ad5ac5be90f666cae204e3bb09a97d14616eedaf3154d9b6 + lastState: {} + name: istio-proxy + ready: true + restartCount: 0 + state: + running: + startedAt: "2019-11-25T13:24:02Z" + hostIP: 10.64.128.234 + initContainerStatuses: + - containerID: docker://28e26004a22f227b5c66123da3ff76160f886dd4114f8d05f912970883ce094c + image: istio/proxy_init:1.3.4 + imageID: docker-pullable://istio/proxy_init@sha256:181215c809b0d294e456cf390990045efbda05fcfdc910bacdc3847f96a7e3b6 + lastState: {} + name: istio-init + ready: true + restartCount: 0 + state: + terminated: + containerID: docker://28e26004a22f227b5c66123da3ff76160f886dd4114f8d05f912970883ce094c + exitCode: 0 + finishedAt: "2019-11-25T13:23:53Z" + reason: Completed + startedAt: "2019-11-25T13:23:52Z" + phase: Running + podIP: 10.64.2.7 + qosClass: Burstable + startTime: "2019-11-25T13:23:51Z" diff --git a/test/unit/metadata-extractor.test.ts b/test/unit/metadata-extractor.test.ts index a4d4904fc..e075e8161 100644 --- a/test/unit/metadata-extractor.test.ts +++ b/test/unit/metadata-extractor.test.ts @@ -1,6 +1,9 @@ import * as tap from 'tap'; +import * as fs from 'fs'; +import * as YAML from 'yaml'; -import { V1OwnerReference, V1Pod } from '@kubernetes/client-node'; +import { V1OwnerReference, V1Pod, V1Deployment } from '@kubernetes/client-node'; +import * as scannerTypes from '../../src/kube-scanner/types'; import * as metadataExtractor from '../../src/kube-scanner/metadata-extractor'; @@ -54,3 +57,23 @@ tap.test('isPodAssociatedWithParent', async (t) => { t.ok(isPodWithMixedOwnerReferencesAssociatedWithParent, 'pod with some owner references with kind is associated with parent'); }); + +tap.test('buildImageMetadata', async (t) => { + const deploymentFixture = fs.readFileSync('./test/fixtures/sidecar-containers/deployment.yaml', 'utf8'); + const deploymentObject: V1Deployment = YAML.parse(deploymentFixture); + const podFixture = fs.readFileSync('./test/fixtures/sidecar-containers/pod.yaml', 'utf8'); + const podObject: V1Pod = YAML.parse(podFixture); + + const deploymentWeirdWrapper: scannerTypes.IKubeObjectMetadata = { + kind: 'Deployment', + objectMeta: deploymentObject.metadata!, + specMeta: deploymentObject.spec!.template.metadata!, + ownerRefs: deploymentObject.metadata!.ownerReferences, + podSpec: deploymentObject.spec!.template.spec!, + } + + t.throws(() => metadataExtractor.buildImageMetadata( + deploymentWeirdWrapper, + podObject.status!.containerStatuses!, + ), 'buildImageMetadata can\'t handle discrepancies between spec and status'); +}); From 79689b0fbd31330acbbdc7c9ca3048a64cd8a58f Mon Sep 17 00:00:00 2001 From: Amir Moualem Date: Wed, 4 Dec 2019 11:40:21 +0200 Subject: [PATCH 4/4] fix: buildImageMetadata when containers miss from the spec this commit gives up supporting sidecar containers injected dynamically by collecting metadata for images only if they appear in both the spec and the status --- src/kube-scanner/metadata-extractor.ts | 18 ++++++++++++------ test/unit/metadata-extractor.test.ts | 22 ++++++++++++++++++++-- 2 files changed, 32 insertions(+), 8 deletions(-) diff --git a/src/kube-scanner/metadata-extractor.ts b/src/kube-scanner/metadata-extractor.ts index ea945aab2..065f5e979 100644 --- a/src/kube-scanner/metadata-extractor.ts +++ b/src/kube-scanner/metadata-extractor.ts @@ -26,7 +26,12 @@ export function buildImageMetadata( containerNameToStatus[containerStatus.name] = containerStatus; } - const images = containerStatuses.map(({ name: containerName }) => ({ + const images: IWorkload[] = []; + for (const containerStatus of containerStatuses) { + if (!(containerStatus.name in containerNameToSpec)) { + continue + } + images.push({ type: kind, name: name || 'unknown', namespace, @@ -35,14 +40,15 @@ export function buildImageMetadata( uid, specLabels: specMeta.labels || {}, specAnnotations: specMeta.annotations || {}, - containerName, - imageName: containerNameToSpec[containerName].image, - imageId: containerNameToStatus[containerName].imageID, + containerName: containerStatus.name, + imageName: containerNameToSpec[containerStatus.name].image, + imageId: containerNameToStatus[containerStatus.name].imageID, cluster: currentClusterName, revision, podSpec, - } as IWorkload), - ); + } as IWorkload); + } + return images; } diff --git a/test/unit/metadata-extractor.test.ts b/test/unit/metadata-extractor.test.ts index e075e8161..d4841152d 100644 --- a/test/unit/metadata-extractor.test.ts +++ b/test/unit/metadata-extractor.test.ts @@ -72,8 +72,26 @@ tap.test('buildImageMetadata', async (t) => { podSpec: deploymentObject.spec!.template.spec!, } - t.throws(() => metadataExtractor.buildImageMetadata( + const imageMetadataResult = metadataExtractor.buildImageMetadata( deploymentWeirdWrapper, podObject.status!.containerStatuses!, - ), 'buildImageMetadata can\'t handle discrepancies between spec and status'); + ); + + t.ok(Array.isArray(imageMetadataResult), 'returns an array'); + t.equals( + imageMetadataResult.length, + 1, + 'the size of the container status array that also appears in the spec', + ); + t.equals(imageMetadataResult[0].type, 'Deployment', 'with the workload type of the parent'); + t.equals( + imageMetadataResult[0].imageId, + 'docker-pullable://eu.gcr.io/cookie/hello-world@sha256:1ac413b2756364b7b856c64d557fdedb97a4ba44ca16fc656e08881650848fe2', + 'the image ID of the first container' + ); + t.equals( + imageMetadataResult[0].imageName, + 'eu.gcr.io/cookie/hello-world:1.20191125.132107-4664980', + 'the image name of the first container' + ); });