From 591116ea0192a95734df7f12bf67e8657c7dcad3 Mon Sep 17 00:00:00 2001 From: Andrea Terzolo Date: Mon, 11 Dec 2023 19:34:36 +0100 Subject: [PATCH 1/6] new(plugins): introduce new `k8smeta` plugin Signed-off-by: Andrea Terzolo --- plugins/k8smeta/.gitignore | 6 + plugins/k8smeta/CMakeLists.txt | 72 + plugins/k8smeta/Makefile | 29 + plugins/k8smeta/README.md | 79 + plugins/k8smeta/cmake/modules/grpc.cmake | 35 + .../cmake/modules/k8s-metacollector.cmake | 14 + plugins/k8smeta/cmake/modules/libs.cmake | 11 + .../cmake/modules/plugin-sdk-cpp.cmake | 13 + plugins/k8smeta/cmake/modules/spdlog.cmake | 11 + plugins/k8smeta/src/grpc_client.cpp | 224 +++ plugins/k8smeta/src/grpc_client.h | 60 + plugins/k8smeta/src/plugin.cpp | 1289 +++++++++++++++++ plugins/k8smeta/src/plugin.h | 276 ++++ plugins/k8smeta/src/plugin_only_consts.h | 44 + .../k8smeta/src/shared_with_tests_consts.h | 101 ++ plugins/k8smeta/test/CMakeLists.txt | 61 + plugins/k8smeta/test/README.md | 12 + .../test/include/k8smeta_tests/helpers.h | 61 + plugins/k8smeta/test/plugin_test_var.h.in | 4 + plugins/k8smeta/test/rules/example_rule.yaml | 6 + plugins/k8smeta/test/src/check_events.cpp | 686 +++++++++ plugins/k8smeta/test/src/init_config.cpp | 94 ++ plugins/k8smeta/test/src/parsing_pod.cpp | 336 +++++ 23 files changed, 3524 insertions(+) create mode 100644 plugins/k8smeta/.gitignore create mode 100644 plugins/k8smeta/CMakeLists.txt create mode 100644 plugins/k8smeta/Makefile create mode 100644 plugins/k8smeta/README.md create mode 100644 plugins/k8smeta/cmake/modules/grpc.cmake create mode 100644 plugins/k8smeta/cmake/modules/k8s-metacollector.cmake create mode 100644 plugins/k8smeta/cmake/modules/libs.cmake create mode 100644 plugins/k8smeta/cmake/modules/plugin-sdk-cpp.cmake create mode 100644 plugins/k8smeta/cmake/modules/spdlog.cmake create mode 100644 plugins/k8smeta/src/grpc_client.cpp create mode 100644 plugins/k8smeta/src/grpc_client.h create mode 100644 plugins/k8smeta/src/plugin.cpp create mode 100644 plugins/k8smeta/src/plugin.h create mode 100644 plugins/k8smeta/src/plugin_only_consts.h create mode 100644 plugins/k8smeta/src/shared_with_tests_consts.h create mode 100644 plugins/k8smeta/test/CMakeLists.txt create mode 100644 plugins/k8smeta/test/README.md create mode 100644 plugins/k8smeta/test/include/k8smeta_tests/helpers.h create mode 100644 plugins/k8smeta/test/plugin_test_var.h.in create mode 100644 plugins/k8smeta/test/rules/example_rule.yaml create mode 100644 plugins/k8smeta/test/src/check_events.cpp create mode 100644 plugins/k8smeta/test/src/init_config.cpp create mode 100644 plugins/k8smeta/test/src/parsing_pod.cpp diff --git a/plugins/k8smeta/.gitignore b/plugins/k8smeta/.gitignore new file mode 100644 index 00000000..e6d1f1f0 --- /dev/null +++ b/plugins/k8smeta/.gitignore @@ -0,0 +1,6 @@ +*.so +*.a +*.o +.vscode +build* +libk8smeta.so diff --git a/plugins/k8smeta/CMakeLists.txt b/plugins/k8smeta/CMakeLists.txt new file mode 100644 index 00000000..8c9440ac --- /dev/null +++ b/plugins/k8smeta/CMakeLists.txt @@ -0,0 +1,72 @@ +cmake_minimum_required(VERSION 3.22) + +list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules") + +option(BUILD_TESTS "Enable test" ON) + +# project metadata +project( + k8smeta + VERSION 0.1.0 + DESCRIPTION "Falco Kubernetes enrichment Plugin" + LANGUAGES CXX) + +# dependencies +include(FetchContent) +include(grpc) +include(spdlog) +include(plugin-sdk-cpp) +include(k8s-metacollector) + +set(PROTO_PATH "${K8S_METACOLLECTOR_DIR}/metadata/metadata.proto") + +get_filename_component(meta_proto "${PROTO_PATH}" ABSOLUTE) +get_filename_component(meta_proto_path "${meta_proto}" PATH) + +# Generated sources +set(PROTO_GENERATED_INCLUDE "${CMAKE_BINARY_DIR}/generated") +if(NOT EXISTS "${PROTO_GENERATED_INCLUDE}") + file(MAKE_DIRECTORY "${PROTO_GENERATED_INCLUDE}") +endif() + +set(meta_proto_srcs "${PROTO_GENERATED_INCLUDE}/metadata.pb.cc") +set(meta_proto_hdrs "${PROTO_GENERATED_INCLUDE}/metadata.pb.h") +set(meta_grpc_srcs "${PROTO_GENERATED_INCLUDE}/metadata.grpc.pb.cc") +set(meta_grpc_hdrs "${PROTO_GENERATED_INCLUDE}/metadata.grpc.pb.h") +add_custom_command( + OUTPUT "${meta_proto_srcs}" "${meta_proto_hdrs}" "${meta_grpc_srcs}" + "${meta_grpc_hdrs}" + COMMAND + ${_PROTOBUF_PROTOC} ARGS --grpc_out "${PROTO_GENERATED_INCLUDE}" --cpp_out + "${PROTO_GENERATED_INCLUDE}" -I "${meta_proto_path}" + --plugin=protoc-gen-grpc="${_GRPC_CPP_PLUGIN_EXECUTABLE}" "${meta_proto}" + DEPENDS "${meta_proto}") + +# project target +file(GLOB_RECURSE K8S_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp") +add_library(k8smeta SHARED ${K8S_SOURCES} ${meta_grpc_srcs} ${meta_grpc_hdrs} + ${meta_proto_srcs} ${meta_proto_hdrs}) +set_target_properties(k8smeta PROPERTIES CXX_EXTENSIONS OFF) + +# project compilation options +target_compile_options(k8smeta PRIVATE "-fPIC") +target_compile_options(k8smeta PRIVATE "-Wl,-z,relro,-z,now") +target_compile_options(k8smeta PRIVATE "-fstack-protector-strong") +# When compiling in Debug mode, this will define the DEBUG symbol for use in +# your code. +target_compile_options(k8smeta PUBLIC "$<$:-DDEBUG>") +target_compile_features(k8smeta PUBLIC cxx_std_17) + +# project includes +target_include_directories( + k8smeta PRIVATE "${PLUGIN_SDK_INLCUDE}" "${PROTO_GENERATED_INCLUDE}" + "${SPDLOG_INLCUDE}") + +# project linked libraries +target_link_libraries(k8smeta ${_REFLECTION} ${_GRPC_GRPCPP} + ${_PROTOBUF_LIBPROTOBUF} re2::re2) + +# Testing +if(BUILD_TESTS) + add_subdirectory(test) +endif() diff --git a/plugins/k8smeta/Makefile b/plugins/k8smeta/Makefile new file mode 100644 index 00000000..e0668883 --- /dev/null +++ b/plugins/k8smeta/Makefile @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2023 The Falco Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + + +NAME := k8smeta +OUTPUT := lib$(NAME).so + +all: $(OUTPUT) + +clean: + rm -rf build $(OUTPUT) + +# This Makefile requies CMake installed on the system +$(OUTPUT): + mkdir build && cd build && cmake -DCMAKE_BUILD_TYPE=Release ../ && make k8smeta -j6 && cp ./$(OUTPUT) ../$(OUTPUT) + +readme: + @$(READMETOOL) -p ./$(OUTPUT) -f README.md diff --git a/plugins/k8smeta/README.md b/plugins/k8smeta/README.md new file mode 100644 index 00000000..9186193b --- /dev/null +++ b/plugins/k8smeta/README.md @@ -0,0 +1,79 @@ +# Kubernetes Audit Events Plugin + +## Introduction + +This plugin enriches Falco syscall flow with Kubernetes Metadata coming from the API server. +The plugin uses a GRPC channel to communicate with a remote [collector](https://github.com/falcosecurity/k8s-metacollector). The collector is indipendent from the plugin and should be deployed as a separate component. The main role of the plugin is to associate each syscall with information about the pod in which they are thrown. + +### Functionality + +TODO + +## Capabilities + +The `k8smeta` plugin implements these capabilities: +* `extraction` +* `parsing` +* `async` + +### Supported Fields + + +| NAME | TYPE | ARG | DESCRIPTION | +|----------------------------|-----------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `k8sres.pod.name` | `string` | None | Kubernetes pod name. | +| `k8sres.pod.id` | `string` | None | Kubernetes pod ID. | +| `k8sres.pod.label` | `string` | Key, Required | Kubernetes pod label. E.g. 'k8sres.pod.label[foo]'. | +| `k8sres.pod.labels` | `string (list)` | None | Kubernetes pod comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | +| `k8sres.pod.ip` | `string` | None | Kubernetes pod ip | +| `k8sres.ns.name` | `string` | None | Kubernetes namespace name. | +| `k8sres.ns.id` | `string` | None | Kubernetes namespace ID. | +| `k8sres.ns.label` | `string` | Key, Required | Kubernetes namespace label. E.g. 'k8sres.ns.label[foo]'. | +| `k8sres.ns.labels` | `string (list)` | None | Kubernetes namespace comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | +| `k8sres.deployment.name` | `string` | None | Kubernetes deployment name. | +| `k8sres.deployment.id` | `string` | None | Kubernetes deployment ID. | +| `k8sres.deployment.label` | `string` | Key, Required | Kubernetes deployment label. E.g. 'k8sres.rs.label[foo]'. | +| `k8sres.deployment.labels` | `string (list)` | None | Kubernetes deployment comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | +| `k8sres.svc.name` | `string (list)` | None | Kubernetes services name. Return a list with all the names of the services associated with the current pod. E.g. '(service1,service2)' | +| `k8sres.svc.id` | `string (list)` | None | Kubernetes services ID. Return a list with all the IDs of the services associated with the current pod. E.g. '(88279776-941c-491e-8da1-95ef30f50fe8,149e72f4-a570-4282-bfa0-25307c5007e8)' | +| `k8sres.svc.label` | `string (list)` | Key, Required | Kubernetes services label. If the services associated with the current pod have a label with this name, return the list of label's values. E.g. if the current pod has 2 services associated and both have the 'foo' label, 'k8sres.svc.label[foo]' will return '(service1-label-value,service2-label-value) | +| `k8sres.svc.labels` | `string (list)` | None | Kubernetes services labels. Return a list with all the comma-separated key/value labels of the services associated with the current pod. E.g. '(foo1:bar1,foo2:bar2)' | +| `k8sres.rs.name` | `string` | None | Kubernetes replica set name. | +| `k8sres.rs.id` | `string` | None | Kubernetes replica set ID. | +| `k8sres.rs.label` | `string` | Key, Required | Kubernetes replica set label. E.g. 'k8sres.rs.label[foo]'. | +| `k8sres.rs.labels` | `string (list)` | None | Kubernetes replica set comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | +| `k8sres.rc.name` | `string` | None | Kubernetes replication controller name. | +| `k8sres.rc.id` | `string` | None | Kubernetes replication controller ID. | +| `k8sres.rc.label` | `string` | Key, Required | Kubernetes replication controller label. E.g. 'k8sres.rc.label[foo]'. | +| `k8sres.rc.labels` | `string (list)` | None | Kubernetes replication controller comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | + + +## Usage + +### Configuration + +Here's an example of configuration of `falco.yaml`: + +```yaml +load_plugins: [k8smeta] + +plugins: + - name: k8smeta + library_path: libk8smeta.so + init_config: + collectorPort: 45000 + collectorHostname: localhost + nodename: kind-control-plane +``` + +**Initialization Config**: + +TODO + +**Open Parameters**: + +The plugin doesn't have open params + +### Rule Example + +To see how to use the plugin fields in a Falco rule check the example rule `/k8smeta/test/rules/example_rule.yaml`. diff --git a/plugins/k8smeta/cmake/modules/grpc.cmake b/plugins/k8smeta/cmake/modules/grpc.cmake new file mode 100644 index 00000000..c94dd56f --- /dev/null +++ b/plugins/k8smeta/cmake/modules/grpc.cmake @@ -0,0 +1,35 @@ +# This cmake module is adapted from the grpc repo: +# `examples/cpp/cmake/common.cmake` + +message(STATUS "Fetching grpc at 'https://github.com/grpc/grpc'") + +find_package(Threads REQUIRED) + +# See: +# https://github.com/protocolbuffers/protobuf/issues/12185#issuecomment-1594685860 +set(ABSL_ENABLE_INSTALL ON) + +# To solve: +# +# CMake Warning at build/_deps/grpc-src/third_party/abseil-cpp/CMakeLists.txt:77 +# (message): A future Abseil release will default ABSL_PROPAGATE_CXX_STD to ON +# for CMake 3.8 and up. We recommend enabling this option to ensure your +# project still builds correctly +set(ABSL_PROPAGATE_CXX_STD ON) + +FetchContent_Declare( + gRPC + GIT_REPOSITORY https://github.com/grpc/grpc + GIT_TAG v1.44.0 + GIT_PROGRESS TRUE) + +set(FETCHCONTENT_QUIET OFF) +FetchContent_MakeAvailable(gRPC) + +set(_PROTOBUF_LIBPROTOBUF libprotobuf) +set(_REFLECTION grpc++_reflection) +set(_PROTOBUF_PROTOC $) +set(_GRPC_GRPCPP grpc++) +set(_GRPC_CPP_PLUGIN_EXECUTABLE $) + +message(STATUS "Using grpc at '${gRPC_SOURCE_DIR}'") diff --git a/plugins/k8smeta/cmake/modules/k8s-metacollector.cmake b/plugins/k8smeta/cmake/modules/k8s-metacollector.cmake new file mode 100644 index 00000000..7d75a223 --- /dev/null +++ b/plugins/k8smeta/cmake/modules/k8s-metacollector.cmake @@ -0,0 +1,14 @@ +message( + STATUS + "Fetching k8s-metacollector at 'https://github.com/falcosecurity/k8s-metacollector.git'" +) + +# Download a non cmake project +FetchContent_Declare( + k8s-metacollector + GIT_REPOSITORY https://github.com/falcosecurity/k8s-metacollector.git + GIT_TAG 982c40ac128cc94557b98d81210cbb13e7825129 + CONFIGURE_COMMAND "" BUILD_COMMAND "") + +FetchContent_Populate(k8s-metacollector) +set(K8S_METACOLLECTOR_DIR "${k8s-metacollector_SOURCE_DIR}") diff --git a/plugins/k8smeta/cmake/modules/libs.cmake b/plugins/k8smeta/cmake/modules/libs.cmake new file mode 100644 index 00000000..995cd195 --- /dev/null +++ b/plugins/k8smeta/cmake/modules/libs.cmake @@ -0,0 +1,11 @@ +message(STATUS "Fetching libs at 'https://github.com/falcosecurity/libs.git'") + +# Just populate it we don't want to build it +FetchContent_Declare( + libs + GIT_REPOSITORY https://github.com/falcosecurity/libs.git + GIT_TAG 8fee2fb4791d50ec5ee4808e5ed235c8b1b309f3 + CONFIGURE_COMMAND "" BUILD_COMMAND "") + +FetchContent_Populate(libs) +set(LIBS_DIR "${libs_SOURCE_DIR}") diff --git a/plugins/k8smeta/cmake/modules/plugin-sdk-cpp.cmake b/plugins/k8smeta/cmake/modules/plugin-sdk-cpp.cmake new file mode 100644 index 00000000..4e737cbc --- /dev/null +++ b/plugins/k8smeta/cmake/modules/plugin-sdk-cpp.cmake @@ -0,0 +1,13 @@ +message( + STATUS + "Fetching plugin-sdk-cpp at 'https://github.com/falcosecurity/plugin-sdk-cpp.git'" +) + +FetchContent_Declare( + plugin-sdk-cpp + GIT_REPOSITORY https://github.com/falcosecurity/plugin-sdk-cpp.git + GIT_TAG 2097bdb5a5d77f3f38162da1f438382912465340) + +FetchContent_MakeAvailable(plugin-sdk-cpp) +set(PLUGIN_SDK_INLCUDE "${plugin-sdk-cpp_SOURCE_DIR}/include") +message(STATUS "Using plugin-sdk-cpp include at '${PLUGIN_SDK_INLCUDE}'") diff --git a/plugins/k8smeta/cmake/modules/spdlog.cmake b/plugins/k8smeta/cmake/modules/spdlog.cmake new file mode 100644 index 00000000..941f0561 --- /dev/null +++ b/plugins/k8smeta/cmake/modules/spdlog.cmake @@ -0,0 +1,11 @@ +message(STATUS "Fetching spdlog at at 'https://github.com/gabime/spdlog'") + +# Header only library +FetchContent_Declare( + spdlog + GIT_REPOSITORY "https://github.com/gabime/spdlog.git" + GIT_TAG v1.12.0) + +FetchContent_MakeAvailable(spdlog) +set(SPDLOG_INLCUDE "${spdlog_SOURCE_DIR}/include") +message(STATUS "Using spdlog include at '${SPDLOG_INLCUDE}'") diff --git a/plugins/k8smeta/src/grpc_client.cpp b/plugins/k8smeta/src/grpc_client.cpp new file mode 100644 index 00000000..16ad6fdc --- /dev/null +++ b/plugins/k8smeta/src/grpc_client.cpp @@ -0,0 +1,224 @@ +/* +Copyright (C) 2023 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#include "grpc_client.h" +#include "shared_with_tests_consts.h" + +#include +#include +#include +#include +#include + +using grpc::Channel; +using grpc::ClientContext; +using grpc::ClientReader; +using grpc::Status; +using metadata::Event; +using metadata::Selector; + +K8sMetaClient::K8sMetaClient(const std::string& nodename, + const std::string& ip_port, + const std::string& ca_PEM_encoding, std::mutex& mu, + std::condition_variable& cv, + std::atomic& thread_quit, + falcosecurity::async_event_handler& handler): + m_status_code(grpc::StatusCode::DO_NOT_USE), + m_cv(cv), m_mu(mu), m_async_thread_quit(thread_quit), + m_handler(handler), m_correctly_reading(0) +{ + metadata::Selector sel; + sel.set_nodename(nodename); + sel.clear_resourcekinds(); + + /// todo! one day we could expose them to the user. + (*sel.mutable_resourcekinds())["Pod"] = "true"; + (*sel.mutable_resourcekinds())["Namespace"] = "true"; + (*sel.mutable_resourcekinds())["Deployment"] = "true"; + (*sel.mutable_resourcekinds())["Service"] = "true"; + (*sel.mutable_resourcekinds())["ReplicaSet"] = "true"; + (*sel.mutable_resourcekinds())["ReplicaController"] = "true"; + + if(!ca_PEM_encoding.empty()) + { + m_stub = metadata::Metadata::NewStub(grpc::CreateChannel( + ip_port, grpc::SslCredentials(grpc::SslCredentialsOptions( + {ca_PEM_encoding, "", ""})))); + } + else + { + // We use an insecure channel + m_stub = metadata::Metadata::NewStub(grpc::CreateChannel( + ip_port, grpc::InsecureChannelCredentials())); + } + + m_stub->async()->Watch(&m_context, &sel, this); + StartRead(&m_event); + StartCall(); +} + +void K8sMetaClient::NotifyEnd(grpc::StatusCode c) +{ + std::unique_lock l(m_mu); + m_status_code = c; + m_cv.notify_one(); +} + +void K8sMetaClient::OnReadDone(bool ok) +{ + if(!ok) + { + // In case of failure we will call `OnDone` method + return; + } + + // Copy the JSON event into the string. + std::string json_string; + google::protobuf::util::JsonPrintOptions options; + auto status = MessageToJsonString(m_event, &json_string, options); + if(!status.ok()) + { + SPDLOG_ERROR("cannot convert message to json: {}", status.ToString()); + NotifyEnd(grpc::StatusCode::DATA_LOSS); + return; + } + + if(m_correctly_reading == 0) + { + // Print a log just once + m_correctly_reading++; + SPDLOG_INFO("The plugin received at least one event from the " + "k8s-metacollector"); + } + + m_enc.set_name(ASYNC_EVENT_NAME); + m_enc.set_data((void*)json_string.c_str(), json_string.size() + 1); + m_enc.encode(m_handler.writer()); + m_handler.push(); + StartRead(&m_event); +} +// Some errors reported by Falco in failure conditions: +// 1. When the server exposes a TLS certificate but the client doesn't: +// ``` +// [2023-11-29 16:42:05.598] [error] [k8smeta] "error during the RPC call. Error +// code (UNAVAILABLE), error message (failed to connect to all addresses; last +// error: UNAVAILABLE: ipv4:127.0.0.1:45000: Socket closed)" +// ``` +// +// 2. When the client exposes a TLS certificate but the server doesn't: +// ``` +// E1129 16:45:02.792586417 17566 ssl_transport_security.cc:1432] Handshake +// failed with fatal error SSL_ERROR_SSL: error:100000f7:SSL +// routines:OPENSSL_internal:WRONG_VERSION_NUMBER. E1129 16:45:02.793554245 +// 17580 ssl_transport_security.cc:1432] Handshake failed with fatal error +// SSL_ERROR_SSL: error:100000f7:SSL +// routines:OPENSSL_internal:WRONG_VERSION_NUMBER. [2023-11-29 16:45:02.793] +// [error] [k8smeta] error during the RPC call. Error code (14), error message +// (failed to connect to all addresses; last error: UNKNOWN: +// ipv4:127.0.0.1:45000: Ssl handshake failed: SSL_ERROR_SSL: error:100000f7:SSL +// routines:OPENSSL_internal:WRONG_VERSION_NUMBER) +// ``` +// +// 3. If the port or the nodename in the plugin init params are wrong +// ``` +// [2023-11-29 17:01:08.802] [error] [k8smeta] error during the RPC call. Error +// code (14), error message (failed to connect to all addresses; last error: +// UNKNOWN: ipv4:127.0.0.1:45001: Failed to connect to remote host: Connection +// refused) +// ``` +// +// 4. If the CA root PEM is wrong +// ``` +// [2023-11-29 17:04:17.633] [error] [k8smeta] Cannot open any PEM bundle at +// '/etc/invalid'. Proceed with insecure connection +// ``` +// +// 5. If the k8s-metacollector restart +// ``` +// [2023-11-29 17:07:11.692] [error] [k8smeta] error during the RPC call. Error +// code (14), error message (Socket closed) [2023-11-29 17:07:13.707] [error] +// [k8smeta] error during the RPC call. Error code (14), error message (failed +// to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:45000: +// Failed to connect to remote host: Connection refused) +// ``` +// +void K8sMetaClient::OnDone(const grpc::Status& s) +{ + switch(s.error_code()) + { + case grpc::StatusCode::OK: + SPDLOG_INFO("gRPC call correctly terminated."); + break; + + case grpc::StatusCode::CANCELLED: + // This happens during Falco hot reload or on termination + SPDLOG_INFO("gRPC call cancelled. Full message: ({})", + s.error_message()); + break; + + default: + SPDLOG_ERROR("error during the RPC call. Error code ({}), error " + "message ({})", + int32_t(s.error_code()), s.error_message()); + break; + } + NotifyEnd(s.error_code()); +} + +// Return true if we need to restart the connection, false if we have done. +bool K8sMetaClient::Await(uint64_t& backoff_seconds) +{ + std::unique_lock l(m_mu); + // m_status_code != grpc::StatusCode::DO_NOT_USE means that we have a new + // status and we need to terminate + m_cv.wait(l, + [this] + { + return m_async_thread_quit.load() || + m_status_code != grpc::StatusCode::DO_NOT_USE; + }); + + if(m_async_thread_quit.load()) + { + // We don't a restart if we receive the stop. + return false; + } + + switch(m_status_code) + { + case grpc::StatusCode::OK: + return false; + + case grpc::StatusCode::UNAUTHENTICATED: + case grpc::StatusCode::PERMISSION_DENIED: + case grpc::StatusCode::FAILED_PRECONDITION: + case grpc::StatusCode::UNAVAILABLE: + // In these cases, we want to update the backoff + backoff_seconds = backoff_seconds * 2 >= MAX_BACKOFF_VALUE + ? MAX_BACKOFF_VALUE + : backoff_seconds * 2; + return true; + + default: + // Reset the backoff + backoff_seconds = MIN_BACKOFF_VALUE; + break; + } + // The only case in which we don't restart is when the server correctly + // terminates the gRPC call. + return true; +} diff --git a/plugins/k8smeta/src/grpc_client.h b/plugins/k8smeta/src/grpc_client.h new file mode 100644 index 00000000..a2e57ed7 --- /dev/null +++ b/plugins/k8smeta/src/grpc_client.h @@ -0,0 +1,60 @@ +/* +Copyright (C) 2023 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#pragma once + +#include +#include +#include +#include +#include +#include "metadata.grpc.pb.h" + +#define MIN_BACKOFF_VALUE 1 // 1 Seconds +#define MAX_BACKOFF_VALUE 120 // 2 Minutes + +class K8sMetaClient : public grpc::ClientReadReactor +{ + public: + K8sMetaClient(const std::string& nodename, const std::string& ip_port, + const std::string& ca_PEM_encoding, std::mutex& mu, + std::condition_variable& cv, std::atomic& thread_quit, + falcosecurity::async_event_handler& handler); + ~K8sMetaClient() { m_context.TryCancel(); } + + bool Await(uint64_t& backoff_seconds); + + private: + void OnReadDone(bool ok) override; + void OnDone(const grpc::Status& s) override; + void NotifyEnd(grpc::StatusCode c); + + std::unique_ptr m_stub; + grpc::ClientContext m_context; + metadata::Event m_event; + falcosecurity::events::asyncevent_e_encoder m_enc; + grpc::StatusCode m_status_code; + + // Shared with the thread that manages the async capability + std::mutex& m_mu; + std::condition_variable& m_cv; + std::atomic& m_async_thread_quit; + falcosecurity::async_event_handler& m_handler; + // Use to print a log message when we can connect at least one time with the + // metacollector. + uint64_t m_correctly_reading; +}; diff --git a/plugins/k8smeta/src/plugin.cpp b/plugins/k8smeta/src/plugin.cpp new file mode 100644 index 00000000..da416791 --- /dev/null +++ b/plugins/k8smeta/src/plugin.cpp @@ -0,0 +1,1289 @@ +/* +Copyright (C) 2023 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#include "plugin_only_consts.h" +#include "shared_with_tests_consts.h" +#include "grpc_client.h" +#include "plugin.h" + +#include +#include +#include +#include +#include +#include +#include + +#define ADD_MODIFY_TABLE_ENTRY(_resource_name, _resource_table) \ + if(resource_kind.compare(_resource_name) == 0) \ + { \ + _resource_table[resource_uid] = res_layout; \ + /* In debug mode we just print which resource has been added/updated \ + */ \ + SPDLOG_DEBUG("added/modified {} {}", _resource_name, resource_uid); \ + /* In trace mode we print also the content of the resource */ \ + SPDLOG_TRACE("resource content {}", res_layout.print_resource()); \ + return; \ + } + +#define DELETE_TABLE_ENTRY(_resource_name, _resource_table) \ + if(resource_kind.compare(_resource_name) == 0) \ + { \ + _resource_table.erase(resource_uid); \ + SPDLOG_DEBUG("deleted {} {}", _resource_name, resource_uid); \ + return; \ + } + +// This is the regex needed to extract the pod_uid from the cgroup +static re2::RE2 pattern(RGX_POD, re2::RE2::POSIX); + +////////////////////////// +// General plugin API +////////////////////////// + +falcosecurity::init_schema my_plugin::get_init_schema() +{ + /// todo!: check config names + falcosecurity::init_schema init_schema; + init_schema.schema_type = + falcosecurity::init_schema_type::SS_PLUGIN_SCHEMA_JSON; + init_schema.schema = R"( +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "required": [ + "collectorHostname", + "collectorPort", + "nodename" + ], + "properties": { + "verbosity": { + "enum": [ + "trace", + "debug", + "info", + "warning", + "error", + "critical" + ], + "title": "The plugin logging verbosity", + "description": "The verbosity that the plugin will use when printing logs." + }, + "collectorHostname": { + "type": "string", + "title": "The collector hostname", + "description": "The hostname used by the plugin to contact the collector (e.g. '128.141.201.74')." + }, + "collectorPort": { + "type": "integer", + "title": "The collector port", + "description": "The port used by the plugin to contact the collector (e.g. '45000')." + }, + "nodename": { + "type": "string", + "title": "The node on which Falco is deployed", + "description": "The plugin collects k8s metadata only for the node on which Falco is deployed so the nodename must be specified." + }, + "caPEMBundle": { + "type": "string", + "title": "The path to the PEM encoding of the server root certificates", + "description": "The path to the PEM encoding of the server root certificates. E.g. '/etc/ssl/certs/ca-certificates.crt'" + } + }, + "additionalProperties": false, + "type": "object" +})"; + return init_schema; +} + +void my_plugin::parse_init_config(nlohmann::json& config_json) +{ + // Verbosity, the default verbosity is already set in the 'init' method + if(config_json.contains(nlohmann::json::json_pointer(VERBOSITY_PATH))) + { + // If the user specified a verbosity we override the actual one (`warn`) + std::string verbosity; + config_json.at(nlohmann::json::json_pointer(VERBOSITY_PATH)) + .get_to(verbosity); + spdlog::set_level(spdlog::level::from_str(verbosity)); + } + + // Collector hostname + if(config_json.contains(nlohmann::json::json_pointer(HOSTNAME_PATH))) + { + config_json.at(nlohmann::json::json_pointer(HOSTNAME_PATH)) + .get_to(m_collector_hostname); + } + else + { + // This should never happen since it is required by the json schema + SPDLOG_CRITICAL("There is no collector hostname in the plugin config"); + assert(false); + } + + // Collector port + if(config_json.contains(nlohmann::json::json_pointer(PORT_PATH))) + { + uint64_t collector_port = 0; + config_json.at(nlohmann::json::json_pointer(PORT_PATH)) + .get_to(collector_port); + m_collector_port = std::to_string(collector_port); + } + else + { + // This should never happen since it is required by the json schema + SPDLOG_CRITICAL("There is no collector port in the plugin config"); + assert(false); + } + + // Nodename + if(config_json.contains(nlohmann::json::json_pointer(NODENAME_PATH))) + { + std::string nodename_string = ""; + config_json.at(nlohmann::json::json_pointer(NODENAME_PATH)) + .get_to(nodename_string); + + // todo!: remove it when we solved in Falco + // This is just a simple workaround until we solve the Falco issue + // If the provided string is an env variable we use the content + // of the env variable + std::string env_var = ""; + re2::RE2 env_var_pattern("(\\${[^}]+})", re2::RE2::POSIX); + if(re2::RE2::PartialMatch(nodename_string, env_var_pattern, &env_var)) + { + // - remove `${` at the beginning, so the start index is 2 + // - the total length is the length of the string -3 (`${}`) + auto env_var_name = env_var.substr(2, env_var.length() - 3); + if(getenv(env_var_name.c_str())) + { + m_nodename = getenv(env_var_name.c_str()); + } + else + { + SPDLOG_CRITICAL("The provided env variable '{}' is empty", + env_var); + m_nodename = ""; + } + } + else + { + m_nodename = nodename_string; + } + SPDLOG_INFO("metadata are received from nodename '{}'", m_nodename); + } + else + { + // This should never happen since it is required by the json schema + SPDLOG_CRITICAL("There is no node name in the plugin config"); + assert(false); + } + + // CA PEM path + + // Default case: insecure connection + m_ca_PEM_encoding = ""; + if(config_json.contains(nlohmann::json::json_pointer(CA_CERT_PATH))) + { + std::string ca_PEM_encoding_path; + config_json.at(nlohmann::json::json_pointer(CA_CERT_PATH)) + .get_to(ca_PEM_encoding_path); + if(!ca_PEM_encoding_path.empty()) + { + std::ifstream input_file(ca_PEM_encoding_path); + if(!input_file.is_open()) + { + SPDLOG_ERROR("Cannot open any PEM bundle at '{}'. Proceed with " + "insecure connection", + ca_PEM_encoding_path); + } + else + { + std::stringstream buffer; + buffer << input_file.rdbuf(); + m_ca_PEM_encoding = buffer.str(); + } + } + } +} + +bool my_plugin::init(falcosecurity::init_input& in) +{ + using st = falcosecurity::state_value_type; + auto& t = in.tables(); + + // The default logger is already multithread. + // The initial verbosity is `warn`, after parsing the plugin config, this + // value could change + spdlog::set_level(spdlog::level::info); + + // Alternatives logs: + // spdlog::set_pattern("%a %b %d %X %Y: [%l] [k8smeta] %v"); + // + // We use local time like in Falco, not UTC + spdlog::set_pattern("%c: [%l] [k8smeta] %v"); + + SPDLOG_DEBUG("init the plugin"); + + // This should never happen, the config is validated by the framework + if(in.get_config().empty()) + { + m_lasterr = "cannot find the init config for the plugin"; + SPDLOG_CRITICAL(m_lasterr); + return false; + } + + auto cfg = nlohmann::json::parse(in.get_config()); + parse_init_config(cfg); + + try + { + m_thread_table = t.get_table(THREAD_TABLE_NAME, st::SS_PLUGIN_ST_INT64); + // Add the pod_uid field into thread table + m_pod_uid_field = m_thread_table.add_field( + t.fields(), POD_UID_FIELD_NAME, st::SS_PLUGIN_ST_STRING); + } + catch(falcosecurity::plugin_exception e) + { + m_lasterr = "cannot add the '" + std::string(POD_UID_FIELD_NAME) + + "' field into the '" + std::string(THREAD_TABLE_NAME) + + "' table: " + e.what(); + SPDLOG_CRITICAL(m_lasterr); + return false; + } + return true; +} + +////////////////////////// +// Async capability +////////////////////////// + +// We need this API to start the async thread when the +// `set_async_event_handler` plugin API will be called. +bool my_plugin::start_async_events( + std::shared_ptr f) +{ + m_async_thread_quit = false; + m_async_thread = std::thread(&my_plugin::async_thread_loop, this, + std::move(f->new_handler())); + return true; +} + +// We need this API to stop the async thread when the +// `set_async_event_handler` plugin API will be called. +bool my_plugin::stop_async_events() noexcept +{ + { + std::unique_lock l(m_mu); + m_async_thread_quit = true; + m_cv.notify_one(); + // Release the lock + } + + if(m_async_thread.joinable()) + { + m_async_thread.join(); + SPDLOG_DEBUG("joined the async thread"); + } + return true; +} + +// This is not a needed API is just a custom method we want to use +// internally. +void my_plugin::async_thread_loop( + std::unique_ptr h) noexcept +{ + std::string ip_port = m_collector_hostname + ":" + m_collector_port; + uint64_t backoff_seconds = MIN_BACKOFF_VALUE; + + while(!m_async_thread_quit.load()) + { + K8sMetaClient k8sclient(m_nodename, ip_port, m_ca_PEM_encoding, m_mu, + m_cv, m_async_thread_quit, *h.get()); + + if(!k8sclient.Await(backoff_seconds)) + { + break; + } + + std::unique_lock l(m_mu); + m_cv.wait_for(l, std::chrono::seconds(backoff_seconds), + [this] { return m_async_thread_quit.load(); }); + } + + SPDLOG_INFO("Async thread terminated"); +} + +////////////////////////// +// Extract capability +////////////////////////// + +std::vector my_plugin::get_fields() +{ + using ft = falcosecurity::field_value_type; + // Use an array to perform a static_assert one the size. + const falcosecurity::field_info fields[] = { + {ft::FTYPE_STRING, "k8sres.pod.name", "Pod Name", + "Kubernetes pod name."}, + {ft::FTYPE_STRING, "k8sres.pod.id", "Pod ID", "Kubernetes pod ID."}, + {ft::FTYPE_STRING, + "k8sres.pod.label", + "Pod Label", + "Kubernetes pod label. E.g. 'k8sres.pod.label[foo]'.", + {.key = true, .required = true}}, + {ft::FTYPE_STRING, "k8sres.pod.labels", "Pod Labels", + "Kubernetes pod comma-separated key/value labels. E.g. " + "'(foo1:bar1,foo2:bar2)'.", + falcosecurity::field_arg(), true}, + {ft::FTYPE_STRING, "k8sres.pod.ip", "Pod Ip", "Kubernetes pod ip"}, + + {ft::FTYPE_STRING, "k8sres.ns.name", "Namespace Name", + "Kubernetes namespace name."}, + {ft::FTYPE_STRING, "k8sres.ns.id", "Namespace ID", + "Kubernetes namespace ID."}, + {ft::FTYPE_STRING, + "k8sres.ns.label", + "Namespace Label", + "Kubernetes namespace label. E.g. 'k8sres.ns.label[foo]'.", + {.key = true, .index = false, .required = true}}, + {ft::FTYPE_STRING, "k8sres.ns.labels", "Namespace Labels", + "Kubernetes namespace comma-separated key/value labels. E.g. " + "'(foo1:bar1,foo2:bar2)'.", + falcosecurity::field_arg(), true}, + + {ft::FTYPE_STRING, "k8sres.deployment.name", "Deployment Name", + "Kubernetes deployment name."}, + {ft::FTYPE_STRING, "k8sres.deployment.id", "Deployment ID", + "Kubernetes deployment ID."}, + {ft::FTYPE_STRING, + "k8sres.deployment.label", + "Deployment Label", + "Kubernetes deployment label. E.g. 'k8sres.rs.label[foo]'.", + {.key = true, .required = true}}, + {ft::FTYPE_STRING, "k8sres.deployment.labels", "Deployment Labels", + "Kubernetes deployment comma-separated key/value labels. E.g. " + "'(foo1:bar1,foo2:bar2)'.", + falcosecurity::field_arg(), true}, + + {ft::FTYPE_STRING, "k8sres.svc.name", "Services Name", + "Kubernetes services name. Return a list with all the names of " + "the services associated with the " + "current pod. E.g. '(service1,service2)'", + falcosecurity::field_arg(), true}, + {ft::FTYPE_STRING, "k8sres.svc.id", "Services ID", + "Kubernetes services ID. Return a list with all the IDs of the " + "services associated with the " + "current pod. E.g. " + "'(88279776-941c-491e-8da1-95ef30f50fe8,149e72f4-a570-4282-bfa0-" + "25307c5007e8)'", + falcosecurity::field_arg(), true}, + {ft::FTYPE_STRING, + "k8sres.svc.label", + "Services Label", + "Kubernetes services label. If the services associated with the " + "current pod have a label with this " + "name, return the list of label's values. E.g. if the current pod " + "has 2 services associated and both " + "have the 'foo' label, 'k8sres.svc.label[foo]' will return " + "'(service1-label-value,service2-label-value)", + {.key = true, .required = true}, + true}, + {ft::FTYPE_STRING, "k8sres.svc.labels", "Services Labels", + "Kubernetes services labels. Return a list with all the " + "comma-separated key/value labels of the " + "services associated with the current pod. E.g. " + "'(foo1:bar1,foo2:bar2)'", + falcosecurity::field_arg(), true}, + + {ft::FTYPE_STRING, "k8sres.rs.name", "Replica Set Name", + "Kubernetes replica set name."}, + {ft::FTYPE_STRING, "k8sres.rs.id", "Replica Set ID", + "Kubernetes replica set ID."}, + {ft::FTYPE_STRING, + "k8sres.rs.label", + "Replica Set Label", + "Kubernetes replica set label. E.g. 'k8sres.rs.label[foo]'.", + {.key = true, .required = true}}, + {ft::FTYPE_STRING, "k8sres.rs.labels", "Replica Set Labels", + "Kubernetes replica set comma-separated key/value labels. E.g. " + "'(foo1:bar1,foo2:bar2)'.", + falcosecurity::field_arg(), true}, + + {ft::FTYPE_STRING, "k8sres.rc.name", "Replication Controller Name", + "Kubernetes replication controller name."}, + {ft::FTYPE_STRING, "k8sres.rc.id", "Replication Controller ID", + "Kubernetes replication controller ID."}, + {ft::FTYPE_STRING, + "k8sres.rc.label", + "Replication Controller Label", + "Kubernetes replication controller label. E.g. " + "'k8sres.rc.label[foo]'.", + {.key = true, .required = true}}, + {ft::FTYPE_STRING, "k8sres.rc.labels", + "Replication Controller Labels", + "Kubernetes replication controller comma-separated key/value " + "labels. E.g. '(foo1:bar1,foo2:bar2)'.", + falcosecurity::field_arg(), true}, + }; + const int fields_size = sizeof(fields) / sizeof(fields[0]); + static_assert(fields_size == K8S_FIELD_MAX, "Wrong number of k8s fields."); + return std::vector(fields, fields + fields_size); +} + +bool inline my_plugin::get_uid_array(nlohmann::json& pod_refs_json, + enum K8sResource resource, + std::vector& uid_array) +{ + std::string json_path = ""; + switch(resource) + { + case NS: + json_path = "/resources/Namespace/list"; + break; + + case DEPLOYMENT: + json_path = "/resources/Deployment/list"; + break; + + case SVC: + json_path = "/resources/Service/list"; + break; + + case RS: + json_path = "/resources/ReplicaSet/list"; + break; + + case RC: + json_path = "/resources/ReplicationController/list"; + break; + + default: + return false; + } + if(!pod_refs_json.contains(nlohmann::json::json_pointer(json_path))) + { + return false; + } + pod_refs_json.at(nlohmann::json::json_pointer(json_path)).get_to(uid_array); + + // If the `contains()` is successful we should always have at least one + // element, this is an extra check. + if(uid_array.empty()) + { + return false; + } + + return true; +} + +bool inline my_plugin::get_layout(nlohmann::json& pod_refs_json, + enum K8sResource resource, + resource_layout& layout) +{ + std::vector uid_array; + if(!get_uid_array(pod_refs_json, resource, uid_array)) + { + return false; + } + + std::unordered_map table; + switch(resource) + { + case NS: + table = m_namespace_table; + break; + + case DEPLOYMENT: + table = m_deployment_table; + break; + + case SVC: + table = m_service_table; + break; + + case RS: + table = m_replicaset_table; + break; + + case RC: + table = m_replication_controller_table; + break; + + default: + return false; + } + + auto it = table.find(uid_array[0]); + if(it == table.end()) + { + return false; + } + layout = it->second; + return true; +} + +bool inline my_plugin::extract_name_from_meta( + nlohmann::json& meta_json, falcosecurity::extract_request& req) +{ + std::string resource_name; + // todo! Possible optimization here and in some other places, some paths + // should always be there. + if(!meta_json.contains(nlohmann::json::json_pointer(NAME_PATH))) + { + SPDLOG_ERROR("The resource meta doesn't contain the '{}' field. " + "Resource meta:\n{}\n", + NAME_PATH, meta_json.dump()); + return false; + } + meta_json.at(nlohmann::json::json_pointer(NAME_PATH)).get_to(resource_name); + req.set_value(resource_name, true); + return true; +} + +bool inline my_plugin::extract_label_value_from_meta( + nlohmann::json& meta_json, falcosecurity::extract_request& req) +{ + if(!req.is_arg_present()) + { + return false; + } + + // We cannot concatenate "/labels/" to extract the label value + // because `` can contain `/` (`app.kubernetes.io/component`), so + // no json paths! We fetch the whole map and then we iterate over it. + std::unordered_map labels_map; + if(!meta_json.contains(nlohmann::json::json_pointer(LABELS_PATH))) + { + // Please note that this is not an error, is possible that + // some resources don't have the `/labels` key. + return false; + } + meta_json.at(nlohmann::json::json_pointer(LABELS_PATH)).get_to(labels_map); + + auto it = labels_map.find(req.get_arg_key()); + if(it == labels_map.end()) + { + return false; + } + req.set_value(it->second, true); + return true; +} + +bool inline my_plugin::extract_labels_from_meta( + nlohmann::json& meta_json, falcosecurity::extract_request& req) +{ + std::unordered_map labels_map; + if(!meta_json.contains(nlohmann::json::json_pointer(LABELS_PATH))) + { + // Please note that this is not an error, is possible that + // some resources don't have the `/labels` key. + return false; + } + meta_json.at(nlohmann::json::json_pointer(LABELS_PATH)).get_to(labels_map); + + std::vector labels; + for(const auto label : labels_map) + { + labels.emplace_back(label.first + ":" + label.second); + } + + if(labels.empty()) + { + return false; + } + + req.set_value(labels.begin(), labels.end(), true); + return true; +} + +bool inline my_plugin::extract_uid_from_refs( + nlohmann::json& pod_refs_json, enum K8sResource resource, + falcosecurity::extract_request& req) +{ + std::vector uid_array; + if(!get_uid_array(pod_refs_json, resource, uid_array)) + { + return false; + } + + // We have at least one element otherwise the previous check should return + // 0. + req.set_value(uid_array[0], true); + return true; +} + +bool inline my_plugin::extract_name_from_refs( + nlohmann::json& pod_refs_json, enum K8sResource resource, + falcosecurity::extract_request& req) +{ + resource_layout rs_layout; + if(!get_layout(pod_refs_json, resource, rs_layout)) + { + return false; + } + return extract_name_from_meta(rs_layout.meta, req); +} + +bool inline my_plugin::extract_label_value_from_refs( + nlohmann::json& pod_refs_json, enum K8sResource resource, + falcosecurity::extract_request& req) +{ + resource_layout rs_layout; + if(!get_layout(pod_refs_json, resource, rs_layout)) + { + return false; + } + return extract_label_value_from_meta(rs_layout.meta, req); +} + +bool inline my_plugin::extract_labels_from_refs( + nlohmann::json& pod_refs_json, enum K8sResource resource, + falcosecurity::extract_request& req) +{ + resource_layout rs_layout; + if(!get_layout(pod_refs_json, resource, rs_layout)) + { + return false; + } + + return extract_labels_from_meta(rs_layout.meta, req); +} + +bool inline my_plugin::extract_uid_array_from_refs( + nlohmann::json& pod_refs_json, enum K8sResource resource, + falcosecurity::extract_request& req) +{ + std::vector uid_array; + if(!get_uid_array(pod_refs_json, resource, uid_array)) + { + return false; + } + + req.set_value(uid_array.begin(), uid_array.end(), true); + return true; +} + +bool inline my_plugin::extract_name_array_from_refs( + nlohmann::json& pod_refs_json, enum K8sResource resource, + falcosecurity::extract_request& req) +{ + std::vector uid_array; + if(!get_uid_array(pod_refs_json, resource, uid_array)) + { + return false; + } + + std::unordered_map table; + switch(resource) + { + case NS: + table = m_namespace_table; + break; + + case DEPLOYMENT: + table = m_deployment_table; + break; + + case SVC: + table = m_service_table; + break; + + case RS: + table = m_replicaset_table; + break; + + case RC: + table = m_replication_controller_table; + break; + + default: + return false; + } + + std::vector name_array; + std::string name; + for(const auto& uid : uid_array) + { + auto it = table.find(uid); + if(it == table.end()) + { + continue; + } + + if(!it->second.meta.contains(nlohmann::json::json_pointer(NAME_PATH))) + { + SPDLOG_ERROR("The resource meta doesn't contain the '{}' field. " + "Resource meta:\n{}\n", + NAME_PATH, it->second.meta.dump()); + return false; + } + it->second.meta.at(nlohmann::json::json_pointer(NAME_PATH)) + .get_to(name); + name_array.push_back(name); + } + + if(name_array.empty()) + { + return false; + } + + req.set_value(name_array.begin(), name_array.end(), true); + return true; +} + +bool inline my_plugin::extract_label_value_array_from_refs( + nlohmann::json& pod_refs_json, enum K8sResource resource, + falcosecurity::extract_request& req) +{ + if(!req.is_arg_present()) + { + return false; + } + + std::vector uid_array; + if(!get_uid_array(pod_refs_json, resource, uid_array)) + { + return false; + } + + std::unordered_map table; + switch(resource) + { + case NS: + table = m_namespace_table; + break; + + case DEPLOYMENT: + table = m_deployment_table; + break; + + case SVC: + table = m_service_table; + break; + + case RS: + table = m_replicaset_table; + break; + + case RC: + table = m_replication_controller_table; + break; + + default: + return false; + } + + std::vector label_value_array; + std::string label_value; + std::unordered_map labels_map; + for(const auto& uid : uid_array) + { + auto layout_it = table.find(uid); + if(layout_it == table.end()) + { + continue; + } + + // If the resource doesn't have a `/labels` field skip it. + if(!layout_it->second.meta.contains( + nlohmann::json::json_pointer(LABELS_PATH))) + { + continue; + } + + layout_it->second.meta.at(nlohmann::json::json_pointer(LABELS_PATH)) + .get_to(labels_map); + auto it = labels_map.find(req.get_arg_key()); + if(it == labels_map.end()) + { + continue; + } + label_value_array.push_back(it->second); + } + + if(label_value_array.empty()) + { + return false; + } + + req.set_value(label_value_array.begin(), label_value_array.end(), true); + return true; +} + +bool inline my_plugin::extract_labels_array_from_refs( + nlohmann::json& pod_refs_json, enum K8sResource resource, + falcosecurity::extract_request& req) +{ + std::vector uid_array; + if(!get_uid_array(pod_refs_json, resource, uid_array)) + { + return false; + } + + std::unordered_map table; + switch(resource) + { + case NS: + table = m_namespace_table; + break; + + case DEPLOYMENT: + table = m_deployment_table; + break; + + case SVC: + table = m_service_table; + break; + + case RS: + table = m_replicaset_table; + break; + + case RC: + table = m_replication_controller_table; + break; + + default: + return false; + } + + std::vector labels_array; + std::unordered_map labels_map; + for(const auto& uid : uid_array) + { + auto layout_it = table.find(uid); + if(layout_it == table.end()) + { + continue; + } + + // If the resource doesn't have a `/labels` field skip it. + if(!layout_it->second.meta.contains( + nlohmann::json::json_pointer(LABELS_PATH))) + { + continue; + } + + layout_it->second.meta.at(nlohmann::json::json_pointer(LABELS_PATH)) + .get_to(labels_map); + for(const auto label : labels_map) + { + labels_array.emplace_back(label.first + ":" + label.second); + } + } + + if(labels_array.empty()) + { + return false; + } + + req.set_value(labels_array.begin(), labels_array.end(), true); + return true; +} + +bool my_plugin::extract(const falcosecurity::extract_fields_input& in) +{ + auto& req = in.get_extract_request(); + auto& tr = in.get_table_reader(); + + int64_t thread_id = in.get_event_reader().get_tid(); + + if(thread_id <= 0) + { + SPDLOG_INFO("unknown thread id for event num '{}' with type '{}'", + in.get_event_reader().get_num(), + int32_t(in.get_event_reader().get_type())); + return false; + } + + falcosecurity::table_entry thread_entry; + std::string pod_uid = ""; + try + { + // retrieve the thread entry associated with this thread id + thread_entry = m_thread_table.get_entry(tr, thread_id); + // retrieve pod_uid from the entry + m_pod_uid_field.read_value(tr, thread_entry, pod_uid); + } + catch(falcosecurity::plugin_exception e) + { + SPDLOG_ERROR("cannot extract the pod uid for the thread id '{}': {}", + thread_id, e.what()); + return false; + } + + // The process is not into a pod, stop here. + if(pod_uid.empty()) + { + return false; + } + + // Try to find the entry associated with the pod_uid + auto it = m_pod_table.find(pod_uid); + if(it == m_pod_table.end()) + { + SPDLOG_DEBUG("the plugin has no info for the pod uid '{}'", pod_uid); + return false; + } + + auto pod_layout = it->second; + switch(req.get_field_id()) + { + case K8S_POD_NAME: + return extract_name_from_meta(pod_layout.meta, req); + case K8S_POD_ID: + req.set_value(pod_uid, true); + break; + case K8S_POD_LABEL: + return extract_label_value_from_meta(pod_layout.meta, req); + case K8S_POD_LABELS: + return extract_labels_from_meta(pod_layout.meta, req); + case K8S_POD_IP: + { + if(!pod_layout.status.contains( + nlohmann::json::json_pointer(POD_IP_PATH))) + { + SPDLOG_ERROR("The pod status doesn't contain the '{}' field. " + "Resource status:\n{}\n", + POD_IP_PATH, pod_layout.status.dump()); + return false; + } + std::string pod_ip; + pod_layout.status.at(nlohmann::json::json_pointer(POD_IP_PATH)) + .get_to(pod_ip); + req.set_value(pod_ip, true); + break; + } + case K8S_NS_NAME: + { + if(!pod_layout.meta.contains( + nlohmann::json::json_pointer(NAMESPACE_PATH))) + { + SPDLOG_ERROR("The pod meta doesn't contain the '{}' field. " + "Resource meta:\n{}\n", + NAMESPACE_PATH, pod_layout.meta.dump()); + return false; + } + std::string pod_namespace_name = ""; + pod_layout.meta.at(nlohmann::json::json_pointer(NAMESPACE_PATH)) + .get_to(pod_namespace_name); + req.set_value(pod_namespace_name, true); + break; + } + case K8S_NS_ID: + return extract_uid_from_refs(pod_layout.refs, NS, req); + case K8S_NS_LABEL: + return extract_label_value_from_refs(pod_layout.refs, NS, req); + case K8S_NS_LABELS: + return extract_labels_from_refs(pod_layout.refs, NS, req); + // We cannot extract deployment fields directly from the pod name + // because it's possible to move some pods from one deployment to + // another under some circumstances. + case K8S_DEPLOYMENT_NAME: + return extract_name_from_refs(pod_layout.refs, DEPLOYMENT, req); + case K8S_DEPLOYMENT_ID: + return extract_uid_from_refs(pod_layout.refs, DEPLOYMENT, req); + case K8S_DEPLOYMENT_LABEL: + return extract_label_value_from_refs(pod_layout.refs, DEPLOYMENT, req); + case K8S_DEPLOYMENT_LABELS: + return extract_labels_from_refs(pod_layout.refs, DEPLOYMENT, req); + case K8S_SVC_NAME: + return extract_name_array_from_refs(pod_layout.refs, SVC, req); + case K8S_SVC_ID: + return extract_uid_array_from_refs(pod_layout.refs, SVC, req); + case K8S_SVC_LABEL: + return extract_label_value_array_from_refs(pod_layout.refs, SVC, req); + case K8S_SVC_LABELS: + return extract_labels_array_from_refs(pod_layout.refs, SVC, req); + // We cannot extract replicaSet fields directly from the pod name + // because it's possible to move some pods from one replicaSet to + // another under some circumstances. + case K8S_RS_NAME: + return extract_name_from_refs(pod_layout.refs, RS, req); + case K8S_RS_ID: + return extract_uid_from_refs(pod_layout.refs, RS, req); + case K8S_RS_LABEL: + return extract_label_value_from_refs(pod_layout.refs, RS, req); + case K8S_RS_LABELS: + return extract_labels_from_refs(pod_layout.refs, RS, req); + // We cannot extract replicationController fields directly from the pod + // name because it's possible to move some pods from one + // replicationController to another under some circumstances. + case K8S_RC_NAME: + return extract_name_from_refs(pod_layout.refs, RC, req); + case K8S_RC_ID: + return extract_uid_from_refs(pod_layout.refs, RC, req); + case K8S_RC_LABEL: + return extract_label_value_from_refs(pod_layout.refs, RC, req); + case K8S_RC_LABELS: + return extract_labels_from_refs(pod_layout.refs, RC, req); + + default: + SPDLOG_ERROR( + "unknown extraction request on field '{}' for pod_uid '{}'", + req.get_field_id(), pod_uid); + return false; + } + + return true; +} + +////////////////////////// +// Parse capability +////////////////////////// + +void inline my_plugin::parse_added_modified_resource(nlohmann::json& json_event, + std::string& resource_uid, + std::string& resource_kind) +{ + // We craft the resource layout + resource_layout res_layout = { + .uid = resource_uid, + .kind = resource_kind, + }; + + if(json_event.contains(nlohmann::json::json_pointer(META_PATH))) + { + std::string meta_string; + json_event.at(nlohmann::json::json_pointer(META_PATH)) + .get_to(meta_string); + res_layout.meta = nlohmann::json::parse(meta_string); + } + + if(json_event.contains(nlohmann::json::json_pointer(SPEC_PATH))) + { + std::string spec_string; + json_event.at(nlohmann::json::json_pointer(SPEC_PATH)) + .get_to(spec_string); + res_layout.spec = nlohmann::json::parse(spec_string); + } + + if(json_event.contains(nlohmann::json::json_pointer(STATUS_PATH))) + { + std::string status_string; + json_event.at(nlohmann::json::json_pointer(STATUS_PATH)) + .get_to(status_string); + res_layout.status = nlohmann::json::parse(status_string); + } + + if(json_event.contains(nlohmann::json::json_pointer(REFS_PATH))) + { + nlohmann::json refs_json; + json_event.at(nlohmann::json::json_pointer(REFS_PATH)) + .get_to(refs_json); + res_layout.refs = refs_json; + } + + ADD_MODIFY_TABLE_ENTRY("Pod", m_pod_table) + ADD_MODIFY_TABLE_ENTRY("Namespace", m_namespace_table) + ADD_MODIFY_TABLE_ENTRY("Deployment", m_deployment_table) + ADD_MODIFY_TABLE_ENTRY("Service", m_service_table) + ADD_MODIFY_TABLE_ENTRY("ReplicaSet", m_replicaset_table) + ADD_MODIFY_TABLE_ENTRY("ReplicationController", + m_replication_controller_table) + ADD_MODIFY_TABLE_ENTRY("DeamonSet", m_deamonset_table) +} + +void inline my_plugin::parse_deleted_resource(nlohmann::json& json_event, + std::string& resource_uid, + std::string& resource_kind) +{ + DELETE_TABLE_ENTRY("Pod", m_pod_table) + DELETE_TABLE_ENTRY("Namespace", m_namespace_table) + DELETE_TABLE_ENTRY("Deployment", m_deployment_table) + DELETE_TABLE_ENTRY("Service", m_service_table) + DELETE_TABLE_ENTRY("ReplicaSet", m_replicaset_table) + DELETE_TABLE_ENTRY("ReplicationController", m_replication_controller_table) + DELETE_TABLE_ENTRY("DeamonSet", m_deamonset_table) +} + +bool inline my_plugin::parse_async_event( + const falcosecurity::parse_event_input& in) +{ + auto& evt = in.get_event_reader(); + falcosecurity::events::asyncevent_e_decoder ad(evt); + if(std::strcmp(ad.get_name(), ASYNC_EVENT_NAME) != 0) + { + // We are not interested in parsing async events that are not + // generated by our plugin. + // This is not an error, it could happen when we have more than one + // async plugin loaded. + SPDLOG_DEBUG("received an sync event with name {}", ad.get_name()); + return true; + } + + uint32_t json_charbuf_len = 0; + char* json_charbuf_pointer = (char*)ad.get_data(json_charbuf_len); + if(json_charbuf_pointer == nullptr) + { + m_lasterr = "there is no payload in the async event"; + SPDLOG_ERROR(m_lasterr); + return false; + } + auto json_event = nlohmann::json::parse(std::string(json_charbuf_pointer)); + + std::string event_reason; + std::string resource_uid; + std::string resource_kind; + + if(!json_event.contains(nlohmann::json::json_pointer(REASON_PATH)) || + !json_event.contains(nlohmann::json::json_pointer(UID_PATH)) || + !json_event.contains(nlohmann::json::json_pointer(KIND_PATH))) + { + SPDLOG_ERROR("Invalid json resource.'{}', '{}', and '{}' should always " + "be present. Resource json:\n{}\n", + REASON_PATH, UID_PATH, KIND_PATH, json_event.dump()); + return false; + } + + json_event.at(nlohmann::json::json_pointer(REASON_PATH)) + .get_to(event_reason); + json_event.at(nlohmann::json::json_pointer(UID_PATH)).get_to(resource_uid); + json_event.at(nlohmann::json::json_pointer(KIND_PATH)) + .get_to(resource_kind); + + if(event_reason.compare(REASON_CREATE) == 0) + { + SPDLOG_DEBUG("try to add {} '{}'", resource_kind, resource_uid); + parse_added_modified_resource(json_event, resource_uid, resource_kind); + } + else if(event_reason.compare(REASON_UPDATE) == 0) + { + SPDLOG_DEBUG("try to update {} '{}'", resource_kind, resource_uid); + parse_added_modified_resource(json_event, resource_uid, resource_kind); + } + else if(event_reason.compare(REASON_DELETE) == 0) + { + SPDLOG_DEBUG("try to delete {} '{}'", resource_kind, resource_uid); + parse_deleted_resource(json_event, resource_uid, resource_kind); + } + else + { + SPDLOG_ERROR("reason '{}' is not known to the plugin", event_reason); + return false; + } + return true; +} + +// Obtain a param from a sinsp event +static inline sinsp_param get_syscall_evt_param(void* evt, uint32_t num_param) +{ + uint32_t dataoffset = 0; + // pointer to the lengths array inside the event. + auto len = (uint16_t*)((uint8_t*)evt + + sizeof(falcosecurity::_internal::ss_plugin_event)); + for(uint32_t j = 0; j < num_param; j++) + { + // sum lengths of the previous params. + dataoffset += len[j]; + } + return {.param_len = len[num_param], + .param_pointer = + ((uint8_t*)&len + [((falcosecurity::_internal::ss_plugin_event*)evt) + ->nparams]) + + dataoffset}; +} + +bool inline my_plugin::extract_pod_uid( + const falcosecurity::parse_event_input& in) +{ + auto res_param = get_syscall_evt_param(in.get_event_reader().get_buf(), + EXECVE_CLONE_RES_PARAM_IDX); + + // - For execve/execveat we exclude failed syscall events + // - For clone/fork/clone3 we exclude failed syscall events (ret<0) and + // caller events (ret>0). + // When the new thread is in a container in libsinsp we only parse the + // child exit event, so we can do the same thing here. In the child the + // return value is `0`. + if(*((uint64_t*)(res_param.param_pointer)) != 0) + { + return false; + } + + /// todo! Possible optimization, we can set the pod_uid only if we are in a + /// container + // but we need to access the `m_flags` field to understand if we are in a + // container or not. It's also true that if we enable this plugin we are in + // a k8s environment so we need to evaluate this. + + // Extract cgroup param + auto cgroup_param = get_syscall_evt_param(in.get_event_reader().get_buf(), + EXECVE_CLONE_CGROUP_PARAM_IDX); + + // If croups are empty we don't parse the event + if(cgroup_param.param_len == 0) + { + return false; + } + + // Our cgroup an array of charbufs `\0`-termiated. The first charbuf could + // be something like this: + // cpuset=/kubelet.slice/kubelet-kubepods.slice/kubelet-kubepods-pod05869489-8c7f-45dc-9abd-1b1620787bb1.slice/cri-containerd-2f92446a3fbfd0b7a73457b45e96c75a25c5e44e7b1bcec165712b906551c261.scope\0 + // So we can put it in a string and apply our regex. + std::string cgroup_first_charbuf = (char*)cgroup_param.param_pointer; + + // We set the pod uid to `""` if we are not able to extract it. + std::string pod_uid = ""; + + if(re2::RE2::PartialMatch(cgroup_first_charbuf, pattern, &pod_uid)) + { + // Here `pod_uid` could have 2 possible layouts: + // - (driver cgroup) pod05869489-8c7f-45dc-9abd-1b1620787bb1 + // - (driver systemd) pod05869489_8c7f_45dc_9abd_1b1620787bb1 + + // As a first thing we remove the "pod" prefix from `pod_uid` + pod_uid.erase(0, 3); + + // Then we convert `_` into `-` if we are in `systemd` notation. + // The final `pod_uid` layout will be: + // 05869489-8c7f-45dc-9abd-1b1620787bb1 + std::replace(pod_uid.begin(), pod_uid.end(), '_', '-'); + } + + // retrieve thread entry associated with the event tid + auto& tr = in.get_table_reader(); + auto thread_entry = m_thread_table.get_entry( + tr, (int64_t)in.get_event_reader().get_tid()); + + // Write the pod_uid into the entry + auto& tw = in.get_table_writer(); + m_pod_uid_field.write_value(tw, thread_entry, (const char*)pod_uid.c_str()); + return true; +} + +bool my_plugin::parse_event(const falcosecurity::parse_event_input& in) +{ + // NOTE: today in the libs framework, parsing errors are not logged + auto& evt = in.get_event_reader(); + + switch(evt.get_type()) + { + case PPME_ASYNCEVENT_E: + return parse_async_event(in); + case PPME_SYSCALL_EXECVE_19_X: + case PPME_SYSCALL_EXECVEAT_X: + case PPME_SYSCALL_CLONE_20_X: + case PPME_SYSCALL_FORK_20_X: + case PPME_SYSCALL_VFORK_20_X: + case PPME_SYSCALL_CLONE3_X: + return extract_pod_uid(in); + default: + SPDLOG_ERROR("received an unknown event type {}", + int32_t(evt.get_type())); + return false; + } +} diff --git a/plugins/k8smeta/src/plugin.h b/plugins/k8smeta/src/plugin.h new file mode 100644 index 00000000..a998910c --- /dev/null +++ b/plugins/k8smeta/src/plugin.h @@ -0,0 +1,276 @@ +/* +Copyright (C) 2023 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#include "plugin_only_consts.h" +#include "shared_with_tests_consts.h" +#include "grpc_client.h" + +#include +#include +#include +#include +#include + +struct resource_layout +{ + std::string uid; + std::string kind; + nlohmann::json meta; + nlohmann::json spec; + nlohmann::json status; + nlohmann::json refs; + + std::string print_resource() const + { + std::ostringstream oss; + oss << "Uid: " << uid << std::endl; + oss << "Kind: " << kind << std::endl; + oss << "Meta: " << meta << std::endl; + oss << "Spec: " << spec << std::endl; + oss << "Status: " << status << std::endl; + oss << "Refs: " << refs << std::endl; + return oss.str(); + } +}; + +struct sinsp_param +{ + uint16_t param_len; + uint8_t* param_pointer; +}; + +class my_plugin +{ + public: + // Keep this aligned with `get_fields` + enum K8sFields + { + K8S_POD_NAME, + K8S_POD_ID, + K8S_POD_LABEL, + K8S_POD_LABELS, + K8S_POD_IP, + K8S_NS_NAME, + K8S_NS_ID, + K8S_NS_LABEL, + K8S_NS_LABELS, + K8S_DEPLOYMENT_NAME, + K8S_DEPLOYMENT_ID, + K8S_DEPLOYMENT_LABEL, + K8S_DEPLOYMENT_LABELS, + K8S_SVC_NAME, + K8S_SVC_ID, + K8S_SVC_LABEL, + K8S_SVC_LABELS, + K8S_RS_NAME, + K8S_RS_ID, + K8S_RS_LABEL, + K8S_RS_LABELS, + K8S_RC_NAME, + K8S_RC_ID, + K8S_RC_LABEL, + K8S_RC_LABELS, + K8S_FIELD_MAX + }; + + enum K8sResource + { + POD, + NS, + DEPLOYMENT, + SVC, + RS, + RC, + }; + + ////////////////////////// + // General plugin API + ////////////////////////// + + virtual ~my_plugin() = default; + + std::string get_name() { return PLUGIN_NAME; } + + std::string get_version() { return PLUGIN_VERSION; } + + std::string get_description() { return PLUGIN_DESCRIPTION; } + + std::string get_contact() { return PLUGIN_CONTACT; } + + std::string get_required_api_version() + { + return PLUGIN_REQUIRED_API_VERSION; + } + + std::string get_last_error() { return m_lasterr; } + + void destroy() { SPDLOG_DEBUG("detach the plugin"); } + + falcosecurity::init_schema get_init_schema(); + + void parse_init_config(nlohmann::json& config_json); + + bool init(falcosecurity::init_input& in); + + ////////////////////////// + // Async capability + ////////////////////////// + + std::vector get_async_events() { return ASYNC_EVENT_NAMES; } + + std::vector get_async_event_sources() + { + return ASYNC_EVENT_SOURCES; + } + + bool start_async_events( + std::shared_ptr f); + + bool stop_async_events() noexcept; + + void async_thread_loop( + std::unique_ptr h) noexcept; + + ////////////////////////// + // Extract capability + ////////////////////////// + + std::vector get_extract_event_sources() + { + return EXTRACT_EVENT_SOURCES; + } + + std::vector get_fields(); + + bool inline get_uid_array(nlohmann::json& pod_refs_json, + enum K8sResource resource, + std::vector& uid_array); + + bool inline get_layout(nlohmann::json& pod_refs_json, + enum K8sResource resource, resource_layout& layout); + + bool inline extract_name_from_meta(nlohmann::json& meta_json, + falcosecurity::extract_request& req); + + bool inline extract_label_value_from_meta( + nlohmann::json& meta_json, falcosecurity::extract_request& req); + + bool inline extract_labels_from_meta(nlohmann::json& meta_json, + falcosecurity::extract_request& req); + + bool inline extract_uid_from_refs(nlohmann::json& pod_refs_json, + enum K8sResource resource, + falcosecurity::extract_request& req); + + bool inline extract_name_from_refs(nlohmann::json& pod_refs_json, + enum K8sResource resource, + falcosecurity::extract_request& req); + + bool inline extract_label_value_from_refs( + nlohmann::json& pod_refs_json, enum K8sResource resource, + falcosecurity::extract_request& req); + + bool inline extract_labels_from_refs(nlohmann::json& pod_refs_json, + enum K8sResource resource, + falcosecurity::extract_request& req); + + bool inline extract_uid_array_from_refs( + nlohmann::json& pod_refs_json, enum K8sResource resource, + falcosecurity::extract_request& req); + + bool inline extract_name_array_from_refs( + nlohmann::json& pod_refs_json, enum K8sResource resource, + falcosecurity::extract_request& req); + + bool inline extract_label_value_array_from_refs( + nlohmann::json& pod_refs_json, enum K8sResource resource, + falcosecurity::extract_request& req); + + bool inline extract_labels_array_from_refs( + nlohmann::json& pod_refs_json, enum K8sResource resource, + falcosecurity::extract_request& req); + + bool extract(const falcosecurity::extract_fields_input& in); + + ////////////////////////// + // Parse capability + ////////////////////////// + + // We need to parse only the async events produced by this plugin. The async + // events produced by this plugin are injected in the syscall event source, + // so here we need to parse events coming from the "syscall" source. + // We will select specific events to parse through the + // `get_parse_event_types` API. + std::vector get_parse_event_sources() + { + return PARSE_EVENT_SOURCES; + } + + std::vector get_parse_event_types() + { + return PARSE_EVENT_CODES; + } + + void inline parse_added_modified_resource(nlohmann::json& json_event, + std::string& resource_uid, + std::string& resource_kind); + + void inline parse_deleted_resource(nlohmann::json& json_event, + std::string& resource_uid, + std::string& resource_kind); + + bool inline parse_async_event(const falcosecurity::parse_event_input& in); + + bool inline extract_pod_uid(const falcosecurity::parse_event_input& in); + + bool parse_event(const falcosecurity::parse_event_input& in); + + private: + // Async thread + std::thread m_async_thread; + std::atomic m_async_thread_quit; + std::condition_variable m_cv; + std::mutex m_mu; + + // Init params + std::string m_collector_hostname; + std::string m_collector_port; + std::string m_nodename; + std::string m_ca_PEM_encoding; + + // State tables + std::unordered_map m_pod_table; + std::unordered_map m_namespace_table; + std::unordered_map m_deployment_table; + std::unordered_map m_service_table; + std::unordered_map m_replicaset_table; + std::unordered_map + m_replication_controller_table; + std::unordered_map m_deamonset_table; + + // Last error of the plugin + std::string m_lasterr; + // Accessor to the thread table + falcosecurity::table m_thread_table; + // Accessors to the fixed fields of the thread table + falcosecurity::table_field m_pod_uid_field; +}; + +FALCOSECURITY_PLUGIN(my_plugin); +FALCOSECURITY_PLUGIN_FIELD_EXTRACTION(my_plugin); +FALCOSECURITY_PLUGIN_ASYNC_EVENTS(my_plugin); +FALCOSECURITY_PLUGIN_EVENT_PARSING(my_plugin); diff --git a/plugins/k8smeta/src/plugin_only_consts.h b/plugins/k8smeta/src/plugin_only_consts.h new file mode 100644 index 00000000..ac2a99cc --- /dev/null +++ b/plugins/k8smeta/src/plugin_only_consts.h @@ -0,0 +1,44 @@ +/* +Copyright (C) 2023 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#pragma once + +/// todo!: According to perf tests we could compile out some logs +#define SPDLOG_ACTIVE_LEVEL SPDLOG_LEVEL_TRACE + +#include +#include +#include + +// Regex to extract the pod uid from cgroups +#define RGX_POD \ + "(pod[a-z0-9]{8}[-_][a-z0-9]{4}[-_][a-z0-9]{4}[-_][a-z0-9]{4}[-_][a-z0-9]" \ + "{12})" + +// Sinsp events used in the plugin +using _et = falcosecurity::event_type; +constexpr auto PPME_ASYNCEVENT_E = (_et)402; +constexpr auto PPME_SYSCALL_CLONE_20_X = (_et)223; +constexpr auto PPME_SYSCALL_CLONE3_X = (_et)335; +constexpr auto PPME_SYSCALL_FORK_20_X = (_et)225; +constexpr auto PPME_SYSCALL_VFORK_20_X = (_et)227; +constexpr auto PPME_SYSCALL_EXECVE_19_X = (_et)293; +constexpr auto PPME_SYSCALL_EXECVEAT_X = (_et)331; + +// Data associated to sinsp events used in the plugin +#define EXECVE_CLONE_RES_PARAM_IDX 0 +#define EXECVE_CLONE_CGROUP_PARAM_IDX 14 diff --git a/plugins/k8smeta/src/shared_with_tests_consts.h b/plugins/k8smeta/src/shared_with_tests_consts.h new file mode 100644 index 00000000..f1fc51dc --- /dev/null +++ b/plugins/k8smeta/src/shared_with_tests_consts.h @@ -0,0 +1,101 @@ +/* +Copyright (C) 2023 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#pragma once + +// This consts file is shared between the plugin and tests + +///////////////////////// +// Async capability +///////////////////////// +#define ASYNC_EVENT_NAME "k8s" +#define ASYNC_EVENT_NAMES \ + { \ + ASYNC_EVENT_NAME \ + } +#define ASYNC_EVENT_SOURCES \ + { \ + "syscall" \ + } + +///////////////////////// +// Extract capability +///////////////////////// +#define EXTRACT_EVENT_NAMES \ + { \ + "" \ + } + +#define EXTRACT_EVENT_SOURCES \ + { \ + "syscall" \ + } + +///////////////////////// +// Parse capability +///////////////////////// +#define PARSE_EVENT_CODES \ + { \ + PPME_ASYNCEVENT_E, PPME_SYSCALL_EXECVE_19_X, PPME_SYSCALL_EXECVEAT_X, \ + PPME_SYSCALL_CLONE_20_X, PPME_SYSCALL_FORK_20_X, \ + PPME_SYSCALL_VFORK_20_X, PPME_SYSCALL_CLONE3_X \ + } + +#define PARSE_EVENT_SOURCES \ + { \ + "syscall" \ + } + +///////////////////////// +// Table fields +///////////////////////// +#define THREAD_TABLE_NAME "threads" +#define POD_UID_FIELD_NAME "pod_uid" + +///////////////////////// +// Proto event reasons +///////////////////////// +#define REASON_CREATE "Create" +#define REASON_UPDATE "Update" +#define REASON_DELETE "Delete" + +///////////////////////// +// Generic plugin consts +///////////////////////// +#define PLUGIN_NAME "k8smeta" +#define PLUGIN_VERSION "0.1.0" +#define PLUGIN_DESCRIPTION \ + "Enrich syscall events with information about the pod that throws them" +#define PLUGIN_CONTACT "github.com/falcosecurity/plugins" +#define PLUGIN_REQUIRED_API_VERSION "3.1.0" + +#define REASON_PATH "/reason" +#define KIND_PATH "/kind" +#define UID_PATH "/uid" +#define META_PATH "/meta" +#define NAME_PATH "/name" +#define NAMESPACE_PATH "/namespace" +#define LABELS_PATH "/labels" +#define POD_IP_PATH "/podIP" +#define SPEC_PATH "/spec" +#define STATUS_PATH "/status" +#define REFS_PATH "/refs" +#define VERBOSITY_PATH "/verbosity" +#define HOSTNAME_PATH "/collectorHostname" +#define PORT_PATH "/collectorPort" +#define NODENAME_PATH "/nodename" +#define CA_CERT_PATH "/caPEMBundle" diff --git a/plugins/k8smeta/test/CMakeLists.txt b/plugins/k8smeta/test/CMakeLists.txt new file mode 100644 index 00000000..f3c34c49 --- /dev/null +++ b/plugins/k8smeta/test/CMakeLists.txt @@ -0,0 +1,61 @@ +include(libs) + +# ############################################################################## +# Build test server +# ############################################################################## + +# Set some test paths inside the collector +set(TEST_SERVER_FOLDER "${K8S_METACOLLECTOR_DIR}/test/server") +set(TEST_JSON_FILE "${TEST_SERVER_FOLDER}/test.json") + +add_custom_target(build-server COMMAND cd "${TEST_SERVER_FOLDER}" && go build .) +add_custom_target(run-server COMMAND "${TEST_SERVER_FOLDER}/test_server" --file + "${TEST_JSON_FILE}") + +# ############################################################################## +# Build tests +# ############################################################################## + +# Create a build directory out of the tree for libs tests +set(SINSP_TEST_FOLDER "${CMAKE_BINARY_DIR}/libs_tests") +file(MAKE_DIRECTORY "${SINSP_TEST_FOLDER}") + +# Prepare some additional includes for plugin tests +set(TEST_EXTRA_INCLUDES "${CMAKE_BINARY_DIR}/plugin_include/k8smeta_tests") +# Put a file with test macros into the build dir +configure_file("${CMAKE_CURRENT_SOURCE_DIR}/plugin_test_var.h.in" + "${TEST_EXTRA_INCLUDES}/plugin_test_var.h") +# Copy an include shared with the plugin into the build dir +configure_file("${CMAKE_SOURCE_DIR}/src/shared_with_tests_consts.h" + "${TEST_EXTRA_INCLUDES}/shared_with_tests_consts.h" COPYONLY) +# Download nlohmann json single include used in tests +file( + DOWNLOAD + "https://raw.githubusercontent.com/nlohmann/json/v3.10.2/single_include/nlohmann/json.hpp" + "${TEST_EXTRA_INCLUDES}/json.hpp" + EXPECTED_HASH + SHA256=059743e48b37e41579ee3a92e82e984bfa0d2a9a2b20b175d04db8089f46f047) + +# Add some additional test source files +file(GLOB_RECURSE K8S_TEST_SUITE ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) +string(REPLACE ";" "\\;" ESCAPED_K8S_TEST_SUITE "${K8S_TEST_SUITE}") + +# Associate the needed includes +list(APPEND K8S_TEST_INCLUDES "${CMAKE_CURRENT_SOURCE_DIR}/include" + "${CMAKE_BINARY_DIR}/plugin_include") +string(REPLACE ";" "\\;" ESCAPED_K8S_TEST_INCLUDES "${K8S_TEST_INCLUDES}") + +add_custom_target( + build-tests + WORKING_DIRECTORY "${SINSP_TEST_FOLDER}" + COMMAND + cmake -S"${LIBS_DIR}" -DCMAKE_BUILD_TYPE=Release -DUSE_BUNDLED_DEPS=ON + -DBUILD_LIBSCAP_GVISOR=OFF -DCREATE_TEST_TARGETS=ON -DMINIMAL_BUILD=ON + -DSCAP_FILES_SUITE_ENABLE=OFF + -DADDITIONAL_SINSP_TESTS_SUITE="${ESCAPED_K8S_TEST_SUITE}" + -DADDITIONAL_SINSP_TESTS_INCLUDE_FOLDERS="${ESCAPED_K8S_TEST_INCLUDES}" + COMMAND make -C "${SINSP_TEST_FOLDER}" unit-test-libsinsp -j4) + +add_custom_target( + run-tests COMMAND "${SINSP_TEST_FOLDER}/libsinsp/test/unit-test-libsinsp" + --gtest_filter='*plugin_k8s*') diff --git a/plugins/k8smeta/test/README.md b/plugins/k8smeta/test/README.md new file mode 100644 index 00000000..631638a8 --- /dev/null +++ b/plugins/k8smeta/test/README.md @@ -0,0 +1,12 @@ +# Tests with libsinsp + +To run the k8s plugin tests we use the libsinsp framework tests, in this way we can check the compatibility with the plugin and a specific framework version + +## Run tests + +```bash +make build-server +make run-server +make build-tests +make run-tests +``` diff --git a/plugins/k8smeta/test/include/k8smeta_tests/helpers.h b/plugins/k8smeta/test/include/k8smeta_tests/helpers.h new file mode 100644 index 00000000..f0dea73c --- /dev/null +++ b/plugins/k8smeta/test/include/k8smeta_tests/helpers.h @@ -0,0 +1,61 @@ + +/* +Copyright (C) 2023 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ +#pragma once + +// We can modify the log verbosity here. +#define INIT_CONFIG \ + "{\"collectorHostname\":\"localhost\",\"collectorPort\": " \ + "45000,\"nodename\":\"control-plane\",\"verbosity\":" \ + "\"info\"}" + +#define ASSERT_STRING_SETS(a, b) \ + { \ + auto a1 = a; \ + auto b1 = b; \ + EXPECT_EQ(a1.size(), b1.size()); \ + ASSERT_EQ(std::set(a1.begin(), a1.end()), \ + std::set(b1.begin(), b1.end())); \ + } + +#define ASSERT_PPME_SETS(a, b) \ + { \ + auto a1 = a; \ + auto b1 = b; \ + EXPECT_EQ(a1.size(), b1.size()); \ + ASSERT_EQ(std::set(a1.begin(), a1.end()), \ + std::set(b1.begin(), b1.end())); \ + } + +#define ASSERT_PLUGIN_INITIALIZATION(p_o, p_l) \ + { \ + p_o = m_inspector.register_plugin(PLUGIN_PATH); \ + ASSERT_TRUE(p_o.get()); \ + std::string err; \ + ASSERT_TRUE(p_o->init(INIT_CONFIG, err)) << "err: " << err; \ + p_l.add_filter_check(m_inspector.new_generic_filtercheck()); \ + p_l.add_filter_check(sinsp_plugin::new_filtercheck(p_o)); \ + } + +#define GENERATE_EXECVE_EVENT_FOR_INIT(_pod_uid) \ + evt = generate_execve_enter_and_exit_event( \ + 0, INIT_TID, INIT_TID, INIT_PID, INIT_PTID, "init", "init", \ + "/lib/systemd/systemd", \ + {"cpuset=/kubepods/besteffort/pod" + _pod_uid + \ + "/691e0ffb65010b2b611f3a15b7f76c48466192e673e156f38bd2f8e25acd6b" \ + "bc"}); \ + ASSERT_EQ(evt->get_type(), PPME_SYSCALL_EXECVE_19_X); diff --git a/plugins/k8smeta/test/plugin_test_var.h.in b/plugins/k8smeta/test/plugin_test_var.h.in new file mode 100644 index 00000000..65a71e5a --- /dev/null +++ b/plugins/k8smeta/test/plugin_test_var.h.in @@ -0,0 +1,4 @@ +#pragma once + +#define PLUGIN_PATH "${CMAKE_BINARY_DIR}/libk8smeta.so" +#define JSON_TEST_FILE_PATH "${TEST_JSON_FILE}" diff --git a/plugins/k8smeta/test/rules/example_rule.yaml b/plugins/k8smeta/test/rules/example_rule.yaml new file mode 100644 index 00000000..241f0c7a --- /dev/null +++ b/plugins/k8smeta/test/rules/example_rule.yaml @@ -0,0 +1,6 @@ +- rule: Example rule for k8s plugin + desc: Detect execve events into pods + # we want to catch all execve events into a pod + condition: evt.type = execve and k8sres.pod.id != "" + output: -> Triggered (pod_name=%k8sres.pod.name pod_id=%k8sres.pod.id pod_ip=%k8sres.pod.ip pod_namespace_name=%k8sres.ns.name pod_deployment_name=%k8sres.deployment.name pod_rs_name=%k8sres.rs.name pod_services_names=%k8sres.svc.name) + priority: WARNING diff --git a/plugins/k8smeta/test/src/check_events.cpp b/plugins/k8smeta/test/src/check_events.cpp new file mode 100644 index 00000000..36e97b25 --- /dev/null +++ b/plugins/k8smeta/test/src/check_events.cpp @@ -0,0 +1,686 @@ +/* +Copyright (C) 2023 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#include +#include +#include +#include +#include + +// Obtained from the plugin folder +#include +#include +#include +#include + +class k8s_json_extractor +{ + public: + k8s_json_extractor() + { + std::ifstream json_file(JSON_TEST_FILE_PATH); + + if(!json_file.is_open()) + { + throw std::runtime_error("unable to open the json file: " + + std::string(JSON_TEST_FILE_PATH)); + } + + nlohmann::json parsed_json_file; + json_file >> parsed_json_file; + + m_num_events = 0; + for(auto& elem : parsed_json_file) + { + m_num_events++; + m_json_events.push_back(elem); + } + } + + uint32_t get_num_events() const { return m_num_events; } + + bool compare_json_event(uint32_t evt_idx, + nlohmann::json json_to_compare) const + { + if(m_json_events.size() < evt_idx) + { + throw std::runtime_error("There are not enough events in the json " + "file! Event in the json file: '" + + std::to_string(get_num_events()) + + "', index required: '" + + std::to_string(evt_idx) + "'"); + } + return m_json_events[evt_idx] == json_to_compare; + } + + nlohmann::json get_json_event(uint32_t evt_idx) const + { + if(m_json_events.size() < evt_idx) + { + throw std::runtime_error("There are not enough events in the json " + "file! Event in the json file: '" + + std::to_string(get_num_events()) + + "', index required: '" + + std::to_string(evt_idx) + "'"); + } + return m_json_events[evt_idx]; + } + + bool match_json_event(std::string reason, std::string resource_uid, + nlohmann::json event_json) + { + std::string event_uid; + std::string event_reason; + + if(!event_json.contains(nlohmann::json::json_pointer(UID_PATH)) || + !event_json.contains(nlohmann::json::json_pointer(REASON_PATH))) + { + return false; + } + + event_json.at(nlohmann::json::json_pointer(REASON_PATH)) + .get_to(event_reason); + event_json.at(nlohmann::json::json_pointer(UID_PATH)).get_to(event_uid); + + return (event_uid.compare(resource_uid) == 0) && + (event_reason.compare(reason) == 0); + } + + nlohmann::json find_json_resource(std::string resource_uid, + int64_t max_num_event = 0) + { + // If not specified we search until the last event + if(max_num_event == 0) + { + max_num_event = m_num_events; + } + + nlohmann::json target_resource_json; + for(uint32_t i = 0; i < m_num_events; i++) + { + if(match_json_event(REASON_CREATE, resource_uid, + m_json_events[i]) || + match_json_event(REASON_UPDATE, resource_uid, m_json_events[i])) + { + target_resource_json = m_json_events[i]; + } + + if(match_json_event(REASON_DELETE, resource_uid, m_json_events[i])) + { + target_resource_json = ""; + } + } + + if(target_resource_json == "") + { + throw std::runtime_error("Resource with uid '" + resource_uid + + "' not found after " + + std::to_string(max_num_event) + + " events."); + } + return target_resource_json; + } + + void read_all_events(sinsp& m_inspector) + { + int rc = 0; + uint32_t num_async_events = 0; + sinsp_evt* evt = NULL; + + // We should always receive all the events from the plugin because + // otherwise we don't know how many events we be processed at max in the + // main loop, all depends on the async thread. + while(this->get_num_events() != num_async_events) + { + rc = m_inspector.next(&evt); + if(rc == SCAP_SUCCESS && evt != nullptr && + evt->get_type() == PPME_ASYNCEVENT_E) + { + num_async_events++; + } + } + } + + // K8S_POD_NAME + std::string extract_pod_name(std::string pod_uid, int64_t max_num_event = 0) + { + auto pod_json = find_json_resource(pod_uid, max_num_event); + + std::string meta_string; + pod_json.at(nlohmann::json::json_pointer(META_PATH)) + .get_to(meta_string); + nlohmann::json meta_json = nlohmann::json::parse(meta_string); + + std::string pod_name; + meta_json.at(nlohmann::json::json_pointer(NAME_PATH)).get_to(pod_name); + return pod_name; + } + + private: + std::vector m_json_events; + uint32_t m_num_events; +}; + +// Check plugin basic APIs +TEST_F(sinsp_with_test_input, plugin_k8s_basic_API) +{ + std::shared_ptr plugin_owner; + filter_check_list pl_flist; + ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist) + + ASSERT_EQ(plugin_owner->caps(), CAP_EXTRACTION | CAP_PARSING | CAP_ASYNC); + ASSERT_EQ(plugin_owner->name(), PLUGIN_NAME); + ASSERT_EQ(plugin_owner->description(), PLUGIN_DESCRIPTION); + ASSERT_EQ(plugin_owner->contact(), PLUGIN_CONTACT); + ASSERT_EQ(plugin_owner->plugin_version(), sinsp_version(PLUGIN_VERSION)); + // The framework version should be compatible with the version required by + // the plugin + ASSERT_TRUE(sinsp_version(PLUGIN_API_VERSION_STR) + .compatible_with(plugin_owner->required_api_version())); + ASSERT_STRING_SETS(plugin_owner->async_event_names(), ASYNC_EVENT_NAMES); + ASSERT_STRING_SETS(plugin_owner->async_event_sources(), + ASYNC_EVENT_SOURCES); + + // We want to extract all syscall events + ASSERT_PPME_SETS(plugin_owner->extract_event_codes(), + libsinsp::events::all_event_set()); + ASSERT_STRING_SETS(plugin_owner->extract_event_sources(), + EXTRACT_EVENT_SOURCES); + + auto parse_event_codes = libsinsp::events::set{ + PPME_ASYNCEVENT_E, PPME_SYSCALL_EXECVE_19_X, + PPME_SYSCALL_EXECVEAT_X, PPME_SYSCALL_CLONE_20_X, + PPME_SYSCALL_FORK_20_X, PPME_SYSCALL_VFORK_20_X, + PPME_SYSCALL_CLONE3_X}; + ASSERT_PPME_SETS(plugin_owner->parse_event_codes(), parse_event_codes); + ASSERT_STRING_SETS(plugin_owner->parse_event_sources(), + PARSE_EVENT_SOURCES); + + // The plugin provide a json schema + ss_plugin_schema_type schema_type; + plugin_owner->get_init_schema(schema_type); + ASSERT_EQ(schema_type, SS_PLUGIN_SCHEMA_JSON); +} + +// Check all plugin fields +TEST_F(sinsp_with_test_input, plugin_k8s_fields_existance) +{ + std::shared_ptr plugin_owner; + filter_check_list pl_flist; + ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist) + + add_default_init_thread(); + open_inspector(); + + // Obtain an event to assert the filterchecks presence against it. + auto evt = generate_random_event(INIT_TID); + ASSERT_TRUE(field_exists(evt, "k8sres.pod.name", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.pod.id", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.pod.label[exists]", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.pod.labels", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.pod.ip", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.ns.name", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.ns.id", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.ns.label[exists]", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.ns.labels", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.deployment.name", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.deployment.id", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.deployment.label[exists]", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.deployment.labels", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.svc.name", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.svc.id", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.svc.label[exists]", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.svc.labels", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.rs.name", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.rs.id", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.rs.label[exists]", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.rs.labels", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.rc.name", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.rc.id", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.rc.label[exists]", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8sres.rc.labels", pl_flist)); + + // The label field must always have an argument with `[]` notation + ASSERT_THROW(field_exists(evt, "k8sres.pod.label.notexists", pl_flist), + sinsp_exception); + ASSERT_THROW(field_exists(evt, "k8sres.ns.labelnotexists", pl_flist), + sinsp_exception); +} + +// Check that the plugin can send all the events received from the server +// without altering them. Assert the number and the json content. +TEST_F(sinsp_with_test_input, plugin_k8s_content_of_async_events) +{ + std::shared_ptr plugin_owner; + filter_check_list pl_flist; + ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist) + + open_inspector(); + + sinsp_evt* evt = NULL; + + int rc = 0; + uint32_t num_async_events = 0; + k8s_json_extractor extractor; + while(extractor.get_num_events() != num_async_events) + { + rc = m_inspector.next(&evt); + if(rc == SCAP_SUCCESS && evt != nullptr && + evt->get_type() == PPME_ASYNCEVENT_E) + { + ASSERT_EQ(evt->get_tid(), -1); + ASSERT_EQ(evt->get_source_idx(), 0); + ASSERT_EQ(std::string(evt->get_source_name()), "syscall"); + ASSERT_STREQ(evt->get_param(1)->m_val, "k8s"); + + // Check that the content of the event is right + // We need to compare the json because the dumped strings could be + // out of order + ASSERT_EQ(extractor.get_json_event(num_async_events), + nlohmann::json::parse(evt->get_param(2)->m_val)); + num_async_events++; + } + } + ASSERT_EQ(extractor.get_num_events(), num_async_events); +} + +// Check pod filterchecks value +TEST_F(sinsp_with_test_input, plugin_k8s_pod_refs) +{ + std::shared_ptr plugin_owner; + filter_check_list pl_flist; + ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist) + + // Open test inspector + add_default_init_thread(); + open_inspector(); + + k8s_json_extractor extractor; + extractor.read_all_events(m_inspector); + + // Call an execve event on init to set the pod uid. + sinsp_evt* evt = NULL; + std::string pod_uid = "5eaeeca9-2277-460b-a4bf-5a0783f6d49f"; + GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); + + // K8S_POD_NAME + ASSERT_EQ(get_field_as_string(evt, "k8sres.pod.name", pl_flist), + "metrics-server-85d6fcf458-tqkcv"); + + // K8S_POD_ID + ASSERT_EQ(get_field_as_string(evt, "k8sres.pod.id", pl_flist), pod_uid); + + // K8S_POD_LABEL + ASSERT_TRUE(field_exists(evt, "k8sres.pod.label[no]", pl_flist)); + ASSERT_EQ(get_field_as_string(evt, "k8sres.pod.label[k8s-app]", pl_flist), + "metrics-server"); + + // K8S_POD_LABELS + ASSERT_TRUE(field_exists(evt, "k8sres.pod.labels", pl_flist)); + ASSERT_EQ(get_field_as_string(evt, "k8sres.pod.labels", pl_flist), + "(pod-template-hash:85d6fcf458,k8s-app:metrics-server)"); + + // K8S_POD_IP + ASSERT_EQ(get_field_as_string(evt, "k8sres.pod.ip", pl_flist), "10.16.1.2"); + + // K8S_NS_NAME + ASSERT_EQ(get_field_as_string(evt, "k8sres.ns.name", pl_flist), + "kube-system"); + + // K8S_NS_ID + ASSERT_EQ(get_field_as_string(evt, "k8sres.ns.id", pl_flist), + "c51d0620-b1e1-449a-a6f2-9f96830831a9"); + + // K8S_NS_LABEL + ASSERT_TRUE(field_exists(evt, "k8sres.ns.label[no]", pl_flist)); + ASSERT_EQ(get_field_as_string( + evt, "k8sres.ns.label[kubernetes.io/metadata.name]", + pl_flist), + "kube-system"); + + // K8S_NS_LABELS + ASSERT_TRUE(field_exists(evt, "k8sres.ns.labels", pl_flist)); + ASSERT_EQ(get_field_as_string(evt, "k8sres.ns.labels", pl_flist), + "(kubernetes.io/metadata.name:kube-system)"); + + // K8S_DEPLOYMENT_NAME + ASSERT_EQ(get_field_as_string(evt, "k8sres.deployment.name", pl_flist), + "metrics-server"); + + // K8S_DEPLOYMENT_ID + ASSERT_EQ(get_field_as_string(evt, "k8sres.deployment.id", pl_flist), + "e56cf37d-5b8b-4b2d-b7bc-3316a3d72e93"); + + // K8S_DEPLOYMENT_LABEL + ASSERT_TRUE(field_exists(evt, "k8sres.deployment.label[no]", pl_flist)); + ASSERT_EQ(get_field_as_string(evt, "k8sres.deployment.label[k8s-app]", + pl_flist), + "metrics-server"); + + // K8S_DEPLOYMENT_LABELS + ASSERT_TRUE(field_exists(evt, "k8sres.deployment.labels", pl_flist)); + ASSERT_EQ(get_field_as_string(evt, "k8sres.deployment.labels", pl_flist), + "(k8s-app:metrics-server)"); + + // K8S_SVC_NAME + // This field is a list so we have this `( )` notation + ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.name", pl_flist), + "(metrics-server)"); + + // K8S_SVC_ID + ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.id", pl_flist), + "(b2af0913-1a07-457f-986a-111caa4fb372)"); + + // K8S_SVC_LABEL + ASSERT_TRUE(field_exists(evt, "k8sres.svc.label[no]", pl_flist)); + // This field is a list so we have this `( )` notation + ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.label[k8s-app]", pl_flist), + "(metrics-server)"); + + // K8S_SVC_LABELS + ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.labels", pl_flist), + "(k8s-app:metrics-server)"); + + // K8S_RS_NAME + ASSERT_EQ(get_field_as_string(evt, "k8sres.rs.name", pl_flist), + "metrics-server-85d6fcf458"); + + // K8S_RS_ID + ASSERT_EQ(get_field_as_string(evt, "k8sres.rs.id", pl_flist), + "8be7cb9d-f96a-41b5-8fb0-81fda92a663a"); + + // K8S_RS_LABEL + ASSERT_TRUE(field_exists(evt, "k8sres.rs.label[no]", pl_flist)); + ASSERT_EQ(get_field_as_string(evt, "k8sres.rs.label[pod-template-hash]", + pl_flist), + "85d6fcf458"); + + // K8S_RS_LABELS + ASSERT_TRUE(field_exists(evt, "k8sres.rs.labels", pl_flist)); + // This field is a list so we have this `( )` notation + ASSERT_EQ(get_field_as_string(evt, "k8sres.rs.labels", pl_flist), + "(pod-template-hash:85d6fcf458,k8s-app:metrics-server)"); + + // K8S_RC_NAME + ASSERT_TRUE(field_exists(evt, "k8sres.rc.name", pl_flist)); + + // K8S_RC_ID + ASSERT_TRUE(field_exists(evt, "k8sres.rc.id", pl_flist)); + + // K8S_RC_LABEL + ASSERT_TRUE(field_exists(evt, "k8sres.rc.label[no]", pl_flist)); + + // K8S_RC_LABELS + ASSERT_TRUE(field_exists(evt, "k8sres.rc.labels", pl_flist)); + + m_inspector.close(); +} + +// Check pod with 2 services +TEST_F(sinsp_with_test_input, plugin_k8s_pod_with_2_services) +{ + std::shared_ptr plugin_owner; + filter_check_list pl_flist; + ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist) + + // Open test inspector + add_default_init_thread(); + open_inspector(); + + k8s_json_extractor extractor; + extractor.read_all_events(m_inspector); + + // From now on we have the pod and the 2 corresponding services are already + // parsed + sinsp_evt* evt = NULL; + std::string pod_uid = "0cc53e7d-1d9f-4798-926b-451364a4ec8e"; + GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); + + // K8S_POD_ID + ASSERT_EQ(get_field_as_string(evt, "k8sres.pod.id", pl_flist), pod_uid); + + // K8S_SVC_NAME + ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.name", pl_flist), + "(nginx-service,nginx-service-second-service)"); + + // K8S_SVC_ID + ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.id", pl_flist), + "(f0fea0cd-24cd-439f-bd51-e7a9100fed40,9e840fbe-93e4-412c-aa23-" + "fbe6d03efd08)"); + + // K8S_SVC_LABEL + ASSERT_TRUE(field_exists(evt, "k8sres.svc.label[no]", pl_flist)); + // Both services have the `app` label + ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.label[app]", pl_flist), + "(custom,custom-2)"); + // Only one of the 2 services has the value for the label + ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.label[service]", pl_flist), + "(service1)"); + + // K8S_SVC_LABELS + ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.labels", pl_flist), + "(service:service1,app:custom,app:custom-2)"); + + m_inspector.close(); +} + +// Check replicationController fields +TEST_F(sinsp_with_test_input, plugin_k8s_pod_with_repliacation_controller) +{ + std::shared_ptr plugin_owner; + filter_check_list pl_flist; + ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist) + + // Open test inspector + add_default_init_thread(); + open_inspector(); + + k8s_json_extractor extractor; + extractor.read_all_events(m_inspector); + + // From now on we have the pod and the corresponding replicationController + sinsp_evt* evt = NULL; + std::string pod_uid = "00e704ac-77d1-4aac-80af-31233b277889"; + GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); + + // K8S_POD_ID + ASSERT_EQ(get_field_as_string(evt, "k8sres.pod.id", pl_flist), pod_uid); + + // K8S_RC_NAME + ASSERT_EQ(get_field_as_string(evt, "k8sres.rc.name", pl_flist), "nginx"); + + // K8S_RC_ID + ASSERT_EQ(get_field_as_string(evt, "k8sres.rc.id", pl_flist), + "f2e2a261-ba86-4fa6-9493-e5260a106126"); + + // K8S_RC_LABEL + ASSERT_TRUE(field_exists(evt, "k8sres.rc.label[no]", pl_flist)); + ASSERT_EQ(get_field_as_string(evt, "k8sres.rc.label[app]", pl_flist), + "nginx"); + + // K8S_RC_LABELS + ASSERT_EQ(get_field_as_string(evt, "k8sres.rc.labels", pl_flist), + "(app:nginx)"); + + m_inspector.close(); +} + +// Remove namespace and deployment associated with the pod and check 2 things: +// 1. the plugin doesn't crash +// 2. the pod fields are still accessible but the deployment and namespace ones +// no +TEST_F(sinsp_with_test_input, plugin_k8s_delete_namespace_and_deployment) +{ + std::shared_ptr plugin_owner; + filter_check_list pl_flist; + ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist) + + // Open test inspector + add_default_init_thread(); + open_inspector(); + + k8s_json_extractor extractor; + extractor.read_all_events(m_inspector); + + sinsp_evt* evt = NULL; + std::string pod_uid = "0cc53e7d-1d9f-4798-926b-451364a4fgjs"; + GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); + + // In the test file we added: + // - a Pod with uid `0cc53e7d-1d9f-4798-926b-451364a4fgjs` + // - a Namespace with uid `f7ju8b13-df0c-43bd-8ded-973f4ede66c6` + // - a Deployment with uid `920r1601-61b6-4d46-8916-db9f36414722` + // + // The Namespace and Depleyoment are removed so we shouldn't be able to + // extract their fields + + // The pod should be still here + ASSERT_EQ(get_field_as_string(evt, "k8sres.pod.id", pl_flist), pod_uid); + + // The namespace name is extracted from the pod meta so we still have it + ASSERT_EQ(get_field_as_string(evt, "k8sres.ns.name", pl_flist), "default"); + // The namespace uid is available because it is obtained from the pod refs + ASSERT_EQ(get_field_as_string(evt, "k8sres.ns.id", pl_flist), + "f7ju8b13-df0c-43bd-8ded-973f4ede66c6"); + // The deployment uid is available because it is obtained from the pod refs + ASSERT_EQ(get_field_as_string(evt, "k8sres.deployment.id", pl_flist), + "920r1601-61b6-4d46-8916-db9f36414722"); + + // These resources are removed so we shouldn't have fields + ASSERT_FALSE(field_has_value( + evt, "k8sres.ns.label[kubernetes.io/metadata.name]", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.ns.labels", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.deployment.name", pl_flist)); + ASSERT_FALSE( + field_has_value(evt, "k8sres.deployment.label[k8s-app]", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.deployment.labels", pl_flist)); + + m_inspector.close(); +} + +// Delete a pod, all associated resources should be no more available from this +// pod, but they should be available from other pods +TEST_F(sinsp_with_test_input, plugin_k8s_delete_a_pod) +{ + std::shared_ptr plugin_owner; + filter_check_list pl_flist; + ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist) + + // Open test inspector + add_default_init_thread(); + open_inspector(); + + k8s_json_extractor extractor; + extractor.read_all_events(m_inspector); + + // This Pod is deleted, all fields should be NULL + sinsp_evt* evt = NULL; + std::string pod_uid = "0cc0927d-1d9f-4798-926b-451364a4fgjs"; + GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); + + ASSERT_FALSE(field_has_value(evt, "k8sres.pod.name", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.pod.id", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.pod.label[exists]", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.pod.labels", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.pod.ip", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.ns.name", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.ns.id", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.ns.label[exists]", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.ns.labels", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.deployment.name", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.deployment.id", pl_flist)); + ASSERT_FALSE( + field_has_value(evt, "k8sres.deployment.label[exists]", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.deployment.labels", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.svc.name", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.svc.id", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.svc.label[exists]", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.svc.labels", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.rs.name", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.rs.id", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.rs.label[exists]", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.rs.labels", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.rc.name", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.rc.id", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.rc.label[exists]", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8sres.rc.labels", pl_flist)); + + // Now we use a pod that still exists and is associated with the same + // Namespace and Deployment We want to check that the Namespace and the + // Deployment are still there. + pod_uid = "5eaeeca9-2277-460b-a4bf-5a0783f6d49f"; + GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); + + ASSERT_TRUE(field_has_value(evt, "k8sres.pod.name", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8sres.pod.id", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8sres.pod.labels", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8sres.pod.ip", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8sres.ns.name", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8sres.ns.id", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8sres.ns.labels", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8sres.deployment.name", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8sres.deployment.id", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8sres.deployment.labels", pl_flist)); + + m_inspector.close(); +} + +// Check that after an "Update" event the plugin tables are updated +// We check 2 updates on 2 different pods +TEST_F(sinsp_with_test_input, plugin_k8s_update_a_pod) +{ + std::shared_ptr plugin_owner; + filter_check_list pl_flist; + ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist) + + // Open test inspector + add_default_init_thread(); + open_inspector(); + + k8s_json_extractor extractor; + extractor.read_all_events(m_inspector); + + sinsp_evt* evt = NULL; + std::string pod_uid = "1d34c7bb-7d94-4f00-bed9-fe4eca61d446"; + GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); + + // After 2 "Updated" events the pod has 2 services + ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.id", pl_flist), + "(f0fea0cd-24cd-439f-bd51-e7a9100fed40,9e840fbe-93e4-412c-aa23-" + "fbe6d03efd08)"); + + // Check another pod that after an "Updated" event has a pod ip + pod_uid = "e581fe16-cde8-4075-a159-cd8ddd5b8fbc"; + GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); + + // After 2 "Updated" events the pod has 2 services + ASSERT_EQ(get_field_as_string(evt, "k8sres.pod.ip", pl_flist), + "10.16.1.20"); + m_inspector.close(); +} + +//////////////////////////////////// +// Missing tests +////////////////////////////////// + +/// todo! Add some tests + +// add a test on a resource without the `/labels` key. + +// Check on a scap file + +// Read a scap-file/huge json file and evaluate perf diff --git a/plugins/k8smeta/test/src/init_config.cpp b/plugins/k8smeta/test/src/init_config.cpp new file mode 100644 index 00000000..57804275 --- /dev/null +++ b/plugins/k8smeta/test/src/init_config.cpp @@ -0,0 +1,94 @@ +/* +Copyright (C) 2023 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#include +#include +#include +#include +#include + +// Obtained from the plugin folder +#include +#include +#include +#include + +TEST_F(sinsp_with_test_input, plugin_k8s_empty_init_config) +{ + auto plugin_owner = m_inspector.register_plugin(PLUGIN_PATH); + ASSERT_TRUE(plugin_owner.get()); + std::string err; + + // The plugin requires an init config with a precise schema + ASSERT_THROW(plugin_owner->init("", err), sinsp_exception); +} + +TEST_F(sinsp_with_test_input, plugin_k8s_init_with_missing_required_argument) +{ + auto plugin_owner = m_inspector.register_plugin(PLUGIN_PATH); + ASSERT_TRUE(plugin_owner.get()); + std::string err; + + // The nodename is also a required argument, but here it is not provided + ASSERT_THROW(plugin_owner->init("{\"collectorHostname\":\"localhost\"," + "\"collectorPort\":\"45000\"}", + err), + sinsp_exception); +} + +TEST_F(sinsp_with_test_input, plugin_k8s_init_with_not_allowed_verbosity_value) +{ + auto plugin_owner = m_inspector.register_plugin(PLUGIN_PATH); + ASSERT_TRUE(plugin_owner.get()); + std::string err; + + // `warn` is not a valid value for the `verbosity` field + ASSERT_THROW(plugin_owner->init("{\"collectorHostname\":\"localhost\"," + "\"collectorPort\":\"45000\",\"nodename\":" + "\"control-plane\",\"verbosity\":\"warn\"}", + err), + sinsp_exception); +} + +TEST_F(sinsp_with_test_input, plugin_k8s_with_simple_config) +{ + auto plugin_owner = m_inspector.register_plugin(PLUGIN_PATH); + ASSERT_TRUE(plugin_owner.get()); + std::string err; + + ASSERT_NO_THROW(plugin_owner->init(R"( +{"collectorHostname":"localhost","collectorPort":45000,"nodename":"kind-control-plane"})", + err)); + ASSERT_EQ(err, ""); +} + +TEST_F(sinsp_with_test_input, plugin_k8s_env_variable) +{ + auto plugin_owner = m_inspector.register_plugin(PLUGIN_PATH); + ASSERT_TRUE(plugin_owner.get()); + std::string err; + + std::string env_var_name = "FALCO_NODE_NAME"; + std::string env_var_value = "kind_control_plane"; + + setenv(env_var_name.c_str(), env_var_value.c_str(), 1); + + ASSERT_NO_THROW(plugin_owner->init(R"( +{"collectorHostname":"localhost","collectorPort":45000,"nodename":" ${FALCO_NODE_NAME} "})", + err)); + ASSERT_EQ(err, ""); +} diff --git a/plugins/k8smeta/test/src/parsing_pod.cpp b/plugins/k8smeta/test/src/parsing_pod.cpp new file mode 100644 index 00000000..7b160ef1 --- /dev/null +++ b/plugins/k8smeta/test/src/parsing_pod.cpp @@ -0,0 +1,336 @@ +/* +Copyright (C) 2023 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#include +#include +#include + +// Obtained from the plugin folder +#include +#include +#include + +// All tests regarding the pod parsing logic and addition/removal of the pod +// field to the thread table. +#define CLONE_FORK_TEST(event) \ + std::shared_ptr plugin_owner; \ + filter_check_list pl_flist; \ + ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist) \ + add_default_init_thread(); \ + open_inspector(); \ + \ + auto ® = m_inspector.get_table_registry(); \ + auto thread_table = reg->get_table(THREAD_TABLE_NAME); \ + auto dynamic_fields = thread_table->dynamic_fields(); \ + auto field = dynamic_fields->fields().find(POD_UID_FIELD_NAME); \ + auto fieldacc = field->second.new_accessor(); \ + \ + int64_t p1_tid = 2; \ + int64_t p1_pid = 2; \ + int64_t p1_ptid = INIT_TID; \ + int64_t p1_vtid = 1; \ + int64_t p1_vpid = 1; \ + \ + std::string expected_pod_uid = "5eaeeca9-2277-460b-a4bf-5a0783f6d49f"; \ + auto evt = generate_clone_x_event( \ + 0, p1_tid, p1_pid, p1_ptid, PPM_CL_CHILD_IN_PIDNS, p1_vtid, \ + p1_vpid, "bash", \ + {"cpuset=/kubepods/besteffort/pod" + expected_pod_uid + \ + "/691e0ffb65010b2b611f3a15b7f76c48466192e673e156f38bd2f8e25acd6b" \ + "bc"}, \ + event); \ + ASSERT_EQ(evt->get_type(), event); \ + \ + auto init_thread_entry = thread_table->get_entry(p1_tid); \ + ASSERT_NE(init_thread_entry, nullptr); \ + std::string pod_uid; \ + init_thread_entry->get_dynamic_field(fieldacc, pod_uid); \ + ASSERT_EQ(pod_uid, expected_pod_uid); + +#define EXECVE_EXECVEAT_TEST(event) \ + std::shared_ptr plugin_owner; \ + filter_check_list pl_flist; \ + ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist) \ + \ + add_default_init_thread(); \ + open_inspector(); \ + \ + auto ® = m_inspector.get_table_registry(); \ + auto thread_table = reg->get_table(THREAD_TABLE_NAME); \ + auto dynamic_fields = thread_table->dynamic_fields(); \ + auto field = dynamic_fields->fields().find(POD_UID_FIELD_NAME); \ + auto fieldacc = field->second.new_accessor(); \ + \ + uint64_t not_relevant_64 = 0; \ + uint32_t not_relevant_32 = 0; \ + \ + std::string expected_pod_uid = "5eaeeca9-2277-460b-a4bf-5a0783f6d49f"; \ + std::vector cgroups1 = {"cpuset=/kubepods/besteffort/pod" + \ + expected_pod_uid + \ + "/691e0ffb65010b2b611f3a15b7f76c4846" \ + "6192e673e156f38bd2f8e25acd6bbc"}; \ + std::string cgroupsv = test_utils::to_null_delimited(cgroups1); \ + scap_const_sized_buffer empty_bytebuf = {/*.buf =*/nullptr, /*.size =*/0}; \ + \ + auto evt = add_event_advance_ts( \ + increasing_ts(), INIT_TID, event, 27, (int64_t)0, "/bin/test-exe", \ + empty_bytebuf, INIT_TID, INIT_PID, INIT_PTID, "", not_relevant_64, \ + not_relevant_64, not_relevant_64, not_relevant_32, \ + not_relevant_32, not_relevant_32, "test-exe", \ + scap_const_sized_buffer{cgroupsv.data(), cgroupsv.size()}, \ + empty_bytebuf, not_relevant_32, not_relevant_64, not_relevant_32, \ + (int32_t)PPM_EXE_WRITABLE, not_relevant_64, not_relevant_64, \ + not_relevant_64, not_relevant_64, not_relevant_64, \ + not_relevant_64, not_relevant_32); \ + ASSERT_EQ(evt->get_type(), event); \ + \ + auto init_thread_entry = thread_table->get_entry(INIT_TID); \ + ASSERT_NE(init_thread_entry, nullptr); \ + std::string pod_uid; \ + init_thread_entry->get_dynamic_field(fieldacc, pod_uid); \ + ASSERT_EQ(pod_uid, expected_pod_uid); + +// Check pod regex with different formats +TEST_F(sinsp_with_test_input, plugin_k8s_pod_uid_regex) +{ + std::shared_ptr plugin_owner; + filter_check_list pl_flist; + ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist) + + add_default_init_thread(); + open_inspector(); + + auto ® = m_inspector.get_table_registry(); + auto thread_table = reg->get_table(THREAD_TABLE_NAME); + auto field = + thread_table->dynamic_fields()->fields().find(POD_UID_FIELD_NAME); + auto fieldacc = field->second.new_accessor(); + auto init_thread_entry = thread_table->get_entry(INIT_TID); + ASSERT_NE(init_thread_entry, nullptr); + std::string pod_uid = ""; + + // CgroupV1, driver cgroup + std::string expected_pod_uid = "05869489-8c7f-45dc-9abd-1b1620787bb1"; + generate_execve_enter_and_exit_event( + 0, INIT_TID, INIT_TID, INIT_PID, INIT_PTID, "init", "init", + "/lib/systemd/systemd", + {"cpuset=/kubepods/besteffort/pod" + expected_pod_uid + + "/691e0ffb65010b2b611f3a15b7f76c48466192e673e156f38bd2f8e25acd6bb" + "c"}); + + // Check that the pod uid is updated after the first execve + init_thread_entry->get_dynamic_field(fieldacc, pod_uid); + ASSERT_EQ(pod_uid, expected_pod_uid); + + // CgroupV1, driver systemd + // systemd has this format with `_` instead of `-` + expected_pod_uid = "0f90f31c_ebeb_4192_a2b0_92e076c43817"; + generate_execve_enter_and_exit_event( + 0, INIT_TID, INIT_TID, INIT_PID, INIT_PTID, "init", "init", + "/lib/systemd/systemd", + {"cpuset=/kubepods.slice/kubepods-besteffort.slice/" + "kubepods-besteffort-pod" + + expected_pod_uid + + ".slice/" + "4c97d83b89df14eea65dbbab1f506b405758341616ab75437d66fd8bab0e2be" + "b"}); + init_thread_entry->get_dynamic_field(fieldacc, pod_uid); + std::replace(expected_pod_uid.begin(), expected_pod_uid.end(), '_', '-'); + ASSERT_EQ(pod_uid, expected_pod_uid); + + // CgroupV2, driver cgroup + expected_pod_uid = "af4fa4cf-129e-4699-a2af-65548fb8977d"; + generate_execve_enter_and_exit_event( + 0, INIT_TID, INIT_TID, INIT_PID, INIT_PTID, "init", "init", + "/lib/systemd/systemd", + {"cpuset=/kubepods/besteffort/pod" + expected_pod_uid + + "/fc16540dcd776bb475437b722c47de798fa1b07687db1ba7d4609c23d5d1a08" + "8"}); + init_thread_entry->get_dynamic_field(fieldacc, pod_uid); + ASSERT_EQ(pod_uid, expected_pod_uid); + + // CgroupV2, driver systemd + expected_pod_uid = "43f23404_e33c_48c7_8114_28ee4b7043ec"; + generate_execve_enter_and_exit_event( + 0, INIT_TID, INIT_TID, INIT_PID, INIT_PTID, "init", "init", + "/lib/systemd/systemd", + {"cpuset=/kubepods.slice/kubepods-besteffort.slice/" + "kubepods-besteffort-pod" + + expected_pod_uid + + ".slice/" + "cri-containerd-" + "b59ce319955234d0b051a93dac5efa8fc07df08d8b0188195b434174efc44e73." + "scope"}); + init_thread_entry->get_dynamic_field(fieldacc, pod_uid); + std::replace(expected_pod_uid.begin(), expected_pod_uid.end(), '_', '-'); + ASSERT_EQ(pod_uid, expected_pod_uid); + + // Not match, wrong pod_uid format + // Use a cgroup with a wrong pod_uid + generate_execve_enter_and_exit_event( + 0, INIT_TID, INIT_TID, INIT_PID, INIT_PTID, "init", "init", + "/lib/systemd/systemd", + {"cpuset=/kubepods.slice/kubepods-besteffort.slice/" + "kubepods-besteffort-pod438943***343r2e-fsdwed-32ewad-e2dw-2." + "slice/" + "cri-containerd-" + "b59ce319955234d0b051a93dac5efa8fc07df08d8b0188195b434174efc44e73." + "scope"}); + init_thread_entry->get_dynamic_field(fieldacc, pod_uid); + // We are not able to extract something valid from the cgroup so we set the + // pod_uid to `""` in the plugin + ASSERT_EQ(pod_uid, ""); +} + +// Check that the plugin defines a new field called "pod_uid" in the `init` +// plugin. +TEST_F(sinsp_with_test_input, plugin_k8s_pod_uid_field_existance) +{ + std::shared_ptr plugin_owner; + filter_check_list pl_flist; + ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist) + + add_default_init_thread(); + open_inspector(); + + // Check that the field is defined by the plugin in the init API + auto ® = m_inspector.get_table_registry(); + ASSERT_EQ(reg->tables().size(), 1); + ASSERT_NE(reg->tables().find(THREAD_TABLE_NAME), reg->tables().end()); + auto thread_table = reg->get_table(THREAD_TABLE_NAME); + auto field = + thread_table->dynamic_fields()->fields().find(POD_UID_FIELD_NAME); + ASSERT_NE(field, thread_table->dynamic_fields()->fields().end()); + ASSERT_EQ(field->second.name(), POD_UID_FIELD_NAME); + ASSERT_EQ(field->second.info(), + libsinsp::state::typeinfo::of()); + + // Try to access this field for the init thread, the value should be empty + // since the plugin doesn't populate it! + auto fieldacc = field->second.new_accessor(); + auto init_thread_entry = thread_table->get_entry(INIT_TID); + ASSERT_NE(init_thread_entry, nullptr); + std::string pod_uid; + init_thread_entry->get_dynamic_field(fieldacc, pod_uid); + ASSERT_EQ(pod_uid, ""); +} + +// Check that clone/fork events are correctly parsed into the plugin and the +// pod_uid is populated for the new thread! +TEST_F(sinsp_with_test_input, plugin_k8s_PPME_SYSCALL_CLONE_20_X_parse) +{ + CLONE_FORK_TEST(PPME_SYSCALL_CLONE_20_X); +} + +TEST_F(sinsp_with_test_input, plugin_k8s_PPME_SYSCALL_FORK_20_X_parse) +{ + CLONE_FORK_TEST(PPME_SYSCALL_FORK_20_X); +} + +TEST_F(sinsp_with_test_input, plugin_k8s_PPME_SYSCALL_VFORK_20_X_parse) +{ + CLONE_FORK_TEST(PPME_SYSCALL_VFORK_20_X); +} + +TEST_F(sinsp_with_test_input, plugin_k8s_PPME_SYSCALL_CLONE3_X_parse) +{ + CLONE_FORK_TEST(PPME_SYSCALL_CLONE3_X); +} + +// Check that execve/execveat events are correctly parsed into the plugin and +// the pod_uid is populated for the new thread! +TEST_F(sinsp_with_test_input, plugin_k8s_PPME_SYSCALL_EXECVE_19_X_parse) +{ + EXECVE_EXECVEAT_TEST(PPME_SYSCALL_EXECVE_19_X); +} + +TEST_F(sinsp_with_test_input, plugin_k8s_PPME_SYSCALL_EXECVEAT_X_parse) +{ + EXECVE_EXECVEAT_TEST(PPME_SYSCALL_EXECVEAT_X); +} + +// Check that the pod_uid is correctly overwritten with an execve after a clone +TEST_F(sinsp_with_test_input, plugin_k8s_execve_after_clone_event) +{ + std::shared_ptr plugin_owner; + filter_check_list pl_flist; + ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist) + + add_default_init_thread(); + open_inspector(); + + auto ® = m_inspector.get_table_registry(); + auto thread_table = reg->get_table(THREAD_TABLE_NAME); + auto field = + thread_table->dynamic_fields()->fields().find(POD_UID_FIELD_NAME); + auto fieldacc = field->second.new_accessor(); + + int64_t p1_tid = 2; + int64_t p1_pid = 2; + int64_t p1_ptid = INIT_TID; + int64_t p1_vtid = 1; + int64_t p1_vpid = 1; + + // Populate the pod_uid with a fist clone event + std::string expected_pod_uid = "5eaeeca9-2277-460b-a4bf-5a0783f6d49f"; + generate_clone_x_event(0, p1_tid, p1_pid, p1_ptid, PPM_CL_CHILD_IN_PIDNS, + p1_vtid, p1_vpid, "bash", + {"cpuset=/kubepods/besteffort/pod" + + expected_pod_uid + + "/691e0ffb65010b2b611f3a15b7f76c48466192e673e156f38" + "bd2f8e25acd6bbc"}, + PPME_SYSCALL_CLONE_20_X); + + auto p1_thread_entry = thread_table->get_entry(p1_tid); + ASSERT_NE(p1_thread_entry, nullptr); + std::string pod_uid = ""; + p1_thread_entry->get_dynamic_field(fieldacc, pod_uid); + ASSERT_EQ(pod_uid, expected_pod_uid); + + // Re-Populate the pod_uid with a following execve event + expected_pod_uid = "05869489-8c7f-45dc-9abd-1b1620787bb1"; + generate_execve_enter_and_exit_event( + 0, p1_tid, p1_tid, p1_pid, p1_ptid, "bash", "bash", "/usr/bin/bash", + {"cpuset=/kubepods/besteffort/pod" + expected_pod_uid + + "/691e0ffb65010b2b611f3a15b7f76c48466192e673e156f38bd2f8e25acd6bb" + "c"}); + + // Check that the pod uid is updated after the first execve + p1_thread_entry->get_dynamic_field(fieldacc, pod_uid); + ASSERT_EQ(pod_uid, expected_pod_uid); +} + +// Check if the thread entry is correctly removed after it is populated by the +// plugin +TEST_F(sinsp_with_test_input, plugin_k8s_check_thread_entry_is_removed) +{ + // Create a new child `p1_tid` + CLONE_FORK_TEST(PPME_SYSCALL_CLONE_20_X); + + // Check that now we have 2 entries in the thread table + ASSERT_EQ(thread_table->entries_count(), 2); + auto p1_tid_tinfo = m_inspector.get_thread_ref(p1_tid, false).get(); + ASSERT_TRUE(p1_tid_tinfo); + + // Call a proc_exit and see if the thread is removed from the thread table + remove_thread(p1_tid, INIT_TID); + p1_tid_tinfo = m_inspector.get_thread_ref(p1_tid, false).get(); + ASSERT_FALSE(p1_tid_tinfo); + + // Now we should have only one entry in the thread table + ASSERT_EQ(thread_table->entries_count(), 1); +} From 982a5b3f9a5e1ad880e3e8a3ee10b3a31b6a8126 Mon Sep 17 00:00:00 2001 From: Andrea Terzolo Date: Mon, 11 Dec 2023 19:35:12 +0100 Subject: [PATCH 2/6] chore(CI): adapt CI to build `k8smeta` plugin Signed-off-by: Andrea Terzolo --- .github/workflows/reusable_build_packages.yaml | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/.github/workflows/reusable_build_packages.yaml b/.github/workflows/reusable_build_packages.yaml index 21df9ba4..8e0b9980 100644 --- a/.github/workflows/reusable_build_packages.yaml +++ b/.github/workflows/reusable_build_packages.yaml @@ -23,8 +23,16 @@ jobs: - name: Install deps run: | apt update - apt install -y --no-install-recommends git awscli - + apt install -y --no-install-recommends git awscli make build-essential autoconf libtool pkg-config + + - name: Install updated cmake version ⛓️ + run: | + curl -L -o /tmp/cmake.tar.gz https://github.com/Kitware/CMake/releases/download/v3.22.5/cmake-3.22.5-linux-$(uname -m).tar.gz + gzip -d /tmp/cmake.tar.gz + tar -xpf /tmp/cmake.tar --directory=/tmp + cp -R /tmp/cmake-3.22.5-linux-$(uname -m)/* /usr + rm -rf /tmp/cmake-3.22.5-linux-$(uname -m) + - name: Checkout Plugins ⤵️ uses: actions/checkout@v3 with: From 5e3b90e497dfc4e1060b5eeb665bd0f2013929a1 Mon Sep 17 00:00:00 2001 From: Andrea Terzolo Date: Mon, 11 Dec 2023 19:35:29 +0100 Subject: [PATCH 3/6] new(CI): add a new workflow for `k8smeta` plugin Signed-off-by: Andrea Terzolo --- .github/workflows/k8smeta-ci.yaml | 72 +++++++++++++++++++++++++++++++ plugins/k8smeta/falco.yaml | 16 +++++++ 2 files changed, 88 insertions(+) create mode 100644 .github/workflows/k8smeta-ci.yaml create mode 100644 plugins/k8smeta/falco.yaml diff --git a/.github/workflows/k8smeta-ci.yaml b/.github/workflows/k8smeta-ci.yaml new file mode 100644 index 00000000..ce0d7e38 --- /dev/null +++ b/.github/workflows/k8smeta-ci.yaml @@ -0,0 +1,72 @@ +name: Build K8smeta plugin +on: + pull_request: + branches: [ master ] + paths: + - 'plugins/k8smeta/**' + push: + branches: [ master ] + paths: + - 'plugins/k8smeta/**' + workflow_dispatch: + +# Checks if any concurrent jobs under the same pull request or branch are being executed +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + build-and-test: + name: build-and-test + runs-on: ubuntu-22.04 + steps: + - name: Checkout ⤵️ + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + + - name: Setup Go + uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0 + with: + go-version: '1.21' + check-latest: true + + - name: Install deps ⛓️ + run: | + sudo apt update -y + sudo apt install -y --no-install-recommends cmake build-essential autoconf libtool pkg-config + + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: cpp + + - name: Build k8s meta plugin 🏗️ + run: | + cd plugins/k8smeta + mkdir build + cd build && cmake -DCMAKE_BUILD_TYPE=Release ../ + make k8smeta -j6 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 + + - name: Build and run tests 🏎️ + run: | + cd plugins/k8smeta/build + make build-server + make build-tests + make run-server & + make run-tests + + formatting-check: + runs-on: ubuntu-22.04 + steps: + - name: Checkout code + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Run clang-format style check + uses: jidicula/clang-format-action@f62da5e3d3a2d88ff364771d9d938773a618ab5e #v4.11.0 + with: + clang-format-version: '14' + check-path: plugins/k8smeta diff --git a/plugins/k8smeta/falco.yaml b/plugins/k8smeta/falco.yaml new file mode 100644 index 00000000..7946b9a8 --- /dev/null +++ b/plugins/k8smeta/falco.yaml @@ -0,0 +1,16 @@ +# Base Falco yaml to be used to validate the plugin + +load_plugins: [k8smeta] + +# We set the verbosity to `critical` to avoid logs during the plugin validation in CI +plugins: + - name: k8smeta + library_path: libk8smeta.so + init_config: + collectorPort: 45000 + collectorHostname: localhost + nodename: kind-control-plane + verbosity: critical + +stdout_output: + enabled: true From 0a1bae8d6e69d92449e60bab8a1d13c11647a815 Mon Sep 17 00:00:00 2001 From: Andrea Terzolo Date: Fri, 15 Dec 2023 16:45:09 +0100 Subject: [PATCH 4/6] cleanup(k8smeta): rename `k8sres.*` into `k8smeta.*` Signed-off-by: Andrea Terzolo Co-authored-by: Melissa Kilby --- plugins/k8smeta/README.md | 50 ++-- plugins/k8smeta/src/plugin.cpp | 62 ++--- plugins/k8smeta/test/rules/example_rule.yaml | 4 +- plugins/k8smeta/test/src/check_events.cpp | 240 +++++++++---------- 4 files changed, 178 insertions(+), 178 deletions(-) diff --git a/plugins/k8smeta/README.md b/plugins/k8smeta/README.md index 9186193b..e1bd6f13 100644 --- a/plugins/k8smeta/README.md +++ b/plugins/k8smeta/README.md @@ -21,31 +21,31 @@ The `k8smeta` plugin implements these capabilities: | NAME | TYPE | ARG | DESCRIPTION | |----------------------------|-----------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `k8sres.pod.name` | `string` | None | Kubernetes pod name. | -| `k8sres.pod.id` | `string` | None | Kubernetes pod ID. | -| `k8sres.pod.label` | `string` | Key, Required | Kubernetes pod label. E.g. 'k8sres.pod.label[foo]'. | -| `k8sres.pod.labels` | `string (list)` | None | Kubernetes pod comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | -| `k8sres.pod.ip` | `string` | None | Kubernetes pod ip | -| `k8sres.ns.name` | `string` | None | Kubernetes namespace name. | -| `k8sres.ns.id` | `string` | None | Kubernetes namespace ID. | -| `k8sres.ns.label` | `string` | Key, Required | Kubernetes namespace label. E.g. 'k8sres.ns.label[foo]'. | -| `k8sres.ns.labels` | `string (list)` | None | Kubernetes namespace comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | -| `k8sres.deployment.name` | `string` | None | Kubernetes deployment name. | -| `k8sres.deployment.id` | `string` | None | Kubernetes deployment ID. | -| `k8sres.deployment.label` | `string` | Key, Required | Kubernetes deployment label. E.g. 'k8sres.rs.label[foo]'. | -| `k8sres.deployment.labels` | `string (list)` | None | Kubernetes deployment comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | -| `k8sres.svc.name` | `string (list)` | None | Kubernetes services name. Return a list with all the names of the services associated with the current pod. E.g. '(service1,service2)' | -| `k8sres.svc.id` | `string (list)` | None | Kubernetes services ID. Return a list with all the IDs of the services associated with the current pod. E.g. '(88279776-941c-491e-8da1-95ef30f50fe8,149e72f4-a570-4282-bfa0-25307c5007e8)' | -| `k8sres.svc.label` | `string (list)` | Key, Required | Kubernetes services label. If the services associated with the current pod have a label with this name, return the list of label's values. E.g. if the current pod has 2 services associated and both have the 'foo' label, 'k8sres.svc.label[foo]' will return '(service1-label-value,service2-label-value) | -| `k8sres.svc.labels` | `string (list)` | None | Kubernetes services labels. Return a list with all the comma-separated key/value labels of the services associated with the current pod. E.g. '(foo1:bar1,foo2:bar2)' | -| `k8sres.rs.name` | `string` | None | Kubernetes replica set name. | -| `k8sres.rs.id` | `string` | None | Kubernetes replica set ID. | -| `k8sres.rs.label` | `string` | Key, Required | Kubernetes replica set label. E.g. 'k8sres.rs.label[foo]'. | -| `k8sres.rs.labels` | `string (list)` | None | Kubernetes replica set comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | -| `k8sres.rc.name` | `string` | None | Kubernetes replication controller name. | -| `k8sres.rc.id` | `string` | None | Kubernetes replication controller ID. | -| `k8sres.rc.label` | `string` | Key, Required | Kubernetes replication controller label. E.g. 'k8sres.rc.label[foo]'. | -| `k8sres.rc.labels` | `string (list)` | None | Kubernetes replication controller comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | +| `k8smeta.pod.name` | `string` | None | Kubernetes pod name. | +| `k8smeta.pod.id` | `string` | None | Kubernetes pod ID. | +| `k8smeta.pod.label` | `string` | Key, Required | Kubernetes pod label. E.g. 'k8smeta.pod.label[foo]'. | +| `k8smeta.pod.labels` | `string (list)` | None | Kubernetes pod comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | +| `k8smeta.pod.ip` | `string` | None | Kubernetes pod ip | +| `k8smeta.ns.name` | `string` | None | Kubernetes namespace name. | +| `k8smeta.ns.id` | `string` | None | Kubernetes namespace ID. | +| `k8smeta.ns.label` | `string` | Key, Required | Kubernetes namespace label. E.g. 'k8smeta.ns.label[foo]'. | +| `k8smeta.ns.labels` | `string (list)` | None | Kubernetes namespace comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | +| `k8smeta.deployment.name` | `string` | None | Kubernetes deployment name. | +| `k8smeta.deployment.id` | `string` | None | Kubernetes deployment ID. | +| `k8smeta.deployment.label` | `string` | Key, Required | Kubernetes deployment label. E.g. 'k8smeta.rs.label[foo]'. | +| `k8smeta.deployment.labels` | `string (list)` | None | Kubernetes deployment comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | +| `k8smeta.svc.name` | `string (list)` | None | Kubernetes services name. Return a list with all the names of the services associated with the current pod. E.g. '(service1,service2)' | +| `k8smeta.svc.id` | `string (list)` | None | Kubernetes services ID. Return a list with all the IDs of the services associated with the current pod. E.g. '(88279776-941c-491e-8da1-95ef30f50fe8,149e72f4-a570-4282-bfa0-25307c5007e8)' | +| `k8smeta.svc.label` | `string (list)` | Key, Required | Kubernetes services label. If the services associated with the current pod have a label with this name, return the list of label's values. E.g. if the current pod has 2 services associated and both have the 'foo' label, 'k8smeta.svc.label[foo]' will return '(service1-label-value,service2-label-value) | +| `k8smeta.svc.labels` | `string (list)` | None | Kubernetes services labels. Return a list with all the comma-separated key/value labels of the services associated with the current pod. E.g. '(foo1:bar1,foo2:bar2)' | +| `k8smeta.rs.name` | `string` | None | Kubernetes replica set name. | +| `k8smeta.rs.id` | `string` | None | Kubernetes replica set ID. | +| `k8smeta.rs.label` | `string` | Key, Required | Kubernetes replica set label. E.g. 'k8smeta.rs.label[foo]'. | +| `k8smeta.rs.labels` | `string (list)` | None | Kubernetes replica set comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | +| `k8smeta.rc.name` | `string` | None | Kubernetes replication controller name. | +| `k8smeta.rc.id` | `string` | None | Kubernetes replication controller ID. | +| `k8smeta.rc.label` | `string` | Key, Required | Kubernetes replication controller label. E.g. 'k8smeta.rc.label[foo]'. | +| `k8smeta.rc.labels` | `string (list)` | None | Kubernetes replication controller comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | ## Usage diff --git a/plugins/k8smeta/src/plugin.cpp b/plugins/k8smeta/src/plugin.cpp index da416791..79bb1824 100644 --- a/plugins/k8smeta/src/plugin.cpp +++ b/plugins/k8smeta/src/plugin.cpp @@ -335,54 +335,54 @@ std::vector my_plugin::get_fields() using ft = falcosecurity::field_value_type; // Use an array to perform a static_assert one the size. const falcosecurity::field_info fields[] = { - {ft::FTYPE_STRING, "k8sres.pod.name", "Pod Name", + {ft::FTYPE_STRING, "k8smeta.pod.name", "Pod Name", "Kubernetes pod name."}, - {ft::FTYPE_STRING, "k8sres.pod.id", "Pod ID", "Kubernetes pod ID."}, + {ft::FTYPE_STRING, "k8smeta.pod.id", "Pod ID", "Kubernetes pod ID."}, {ft::FTYPE_STRING, - "k8sres.pod.label", + "k8smeta.pod.label", "Pod Label", - "Kubernetes pod label. E.g. 'k8sres.pod.label[foo]'.", + "Kubernetes pod label. E.g. 'k8smeta.pod.label[foo]'.", {.key = true, .required = true}}, - {ft::FTYPE_STRING, "k8sres.pod.labels", "Pod Labels", + {ft::FTYPE_STRING, "k8smeta.pod.labels", "Pod Labels", "Kubernetes pod comma-separated key/value labels. E.g. " "'(foo1:bar1,foo2:bar2)'.", falcosecurity::field_arg(), true}, - {ft::FTYPE_STRING, "k8sres.pod.ip", "Pod Ip", "Kubernetes pod ip"}, + {ft::FTYPE_STRING, "k8smeta.pod.ip", "Pod Ip", "Kubernetes pod ip"}, - {ft::FTYPE_STRING, "k8sres.ns.name", "Namespace Name", + {ft::FTYPE_STRING, "k8smeta.ns.name", "Namespace Name", "Kubernetes namespace name."}, - {ft::FTYPE_STRING, "k8sres.ns.id", "Namespace ID", + {ft::FTYPE_STRING, "k8smeta.ns.id", "Namespace ID", "Kubernetes namespace ID."}, {ft::FTYPE_STRING, - "k8sres.ns.label", + "k8smeta.ns.label", "Namespace Label", - "Kubernetes namespace label. E.g. 'k8sres.ns.label[foo]'.", + "Kubernetes namespace label. E.g. 'k8smeta.ns.label[foo]'.", {.key = true, .index = false, .required = true}}, - {ft::FTYPE_STRING, "k8sres.ns.labels", "Namespace Labels", + {ft::FTYPE_STRING, "k8smeta.ns.labels", "Namespace Labels", "Kubernetes namespace comma-separated key/value labels. E.g. " "'(foo1:bar1,foo2:bar2)'.", falcosecurity::field_arg(), true}, - {ft::FTYPE_STRING, "k8sres.deployment.name", "Deployment Name", + {ft::FTYPE_STRING, "k8smeta.deployment.name", "Deployment Name", "Kubernetes deployment name."}, - {ft::FTYPE_STRING, "k8sres.deployment.id", "Deployment ID", + {ft::FTYPE_STRING, "k8smeta.deployment.id", "Deployment ID", "Kubernetes deployment ID."}, {ft::FTYPE_STRING, - "k8sres.deployment.label", + "k8smeta.deployment.label", "Deployment Label", - "Kubernetes deployment label. E.g. 'k8sres.rs.label[foo]'.", + "Kubernetes deployment label. E.g. 'k8smeta.rs.label[foo]'.", {.key = true, .required = true}}, - {ft::FTYPE_STRING, "k8sres.deployment.labels", "Deployment Labels", + {ft::FTYPE_STRING, "k8smeta.deployment.labels", "Deployment Labels", "Kubernetes deployment comma-separated key/value labels. E.g. " "'(foo1:bar1,foo2:bar2)'.", falcosecurity::field_arg(), true}, - {ft::FTYPE_STRING, "k8sres.svc.name", "Services Name", + {ft::FTYPE_STRING, "k8smeta.svc.name", "Services Name", "Kubernetes services name. Return a list with all the names of " "the services associated with the " "current pod. E.g. '(service1,service2)'", falcosecurity::field_arg(), true}, - {ft::FTYPE_STRING, "k8sres.svc.id", "Services ID", + {ft::FTYPE_STRING, "k8smeta.svc.id", "Services ID", "Kubernetes services ID. Return a list with all the IDs of the " "services associated with the " "current pod. E.g. " @@ -390,48 +390,48 @@ std::vector my_plugin::get_fields() "25307c5007e8)'", falcosecurity::field_arg(), true}, {ft::FTYPE_STRING, - "k8sres.svc.label", + "k8smeta.svc.label", "Services Label", "Kubernetes services label. If the services associated with the " "current pod have a label with this " "name, return the list of label's values. E.g. if the current pod " "has 2 services associated and both " - "have the 'foo' label, 'k8sres.svc.label[foo]' will return " + "have the 'foo' label, 'k8smeta.svc.label[foo]' will return " "'(service1-label-value,service2-label-value)", {.key = true, .required = true}, true}, - {ft::FTYPE_STRING, "k8sres.svc.labels", "Services Labels", + {ft::FTYPE_STRING, "k8smeta.svc.labels", "Services Labels", "Kubernetes services labels. Return a list with all the " "comma-separated key/value labels of the " "services associated with the current pod. E.g. " "'(foo1:bar1,foo2:bar2)'", falcosecurity::field_arg(), true}, - {ft::FTYPE_STRING, "k8sres.rs.name", "Replica Set Name", + {ft::FTYPE_STRING, "k8smeta.rs.name", "Replica Set Name", "Kubernetes replica set name."}, - {ft::FTYPE_STRING, "k8sres.rs.id", "Replica Set ID", + {ft::FTYPE_STRING, "k8smeta.rs.id", "Replica Set ID", "Kubernetes replica set ID."}, {ft::FTYPE_STRING, - "k8sres.rs.label", + "k8smeta.rs.label", "Replica Set Label", - "Kubernetes replica set label. E.g. 'k8sres.rs.label[foo]'.", + "Kubernetes replica set label. E.g. 'k8smeta.rs.label[foo]'.", {.key = true, .required = true}}, - {ft::FTYPE_STRING, "k8sres.rs.labels", "Replica Set Labels", + {ft::FTYPE_STRING, "k8smeta.rs.labels", "Replica Set Labels", "Kubernetes replica set comma-separated key/value labels. E.g. " "'(foo1:bar1,foo2:bar2)'.", falcosecurity::field_arg(), true}, - {ft::FTYPE_STRING, "k8sres.rc.name", "Replication Controller Name", + {ft::FTYPE_STRING, "k8smeta.rc.name", "Replication Controller Name", "Kubernetes replication controller name."}, - {ft::FTYPE_STRING, "k8sres.rc.id", "Replication Controller ID", + {ft::FTYPE_STRING, "k8smeta.rc.id", "Replication Controller ID", "Kubernetes replication controller ID."}, {ft::FTYPE_STRING, - "k8sres.rc.label", + "k8smeta.rc.label", "Replication Controller Label", "Kubernetes replication controller label. E.g. " - "'k8sres.rc.label[foo]'.", + "'k8smeta.rc.label[foo]'.", {.key = true, .required = true}}, - {ft::FTYPE_STRING, "k8sres.rc.labels", + {ft::FTYPE_STRING, "k8smeta.rc.labels", "Replication Controller Labels", "Kubernetes replication controller comma-separated key/value " "labels. E.g. '(foo1:bar1,foo2:bar2)'.", diff --git a/plugins/k8smeta/test/rules/example_rule.yaml b/plugins/k8smeta/test/rules/example_rule.yaml index 241f0c7a..2329d0b6 100644 --- a/plugins/k8smeta/test/rules/example_rule.yaml +++ b/plugins/k8smeta/test/rules/example_rule.yaml @@ -1,6 +1,6 @@ - rule: Example rule for k8s plugin desc: Detect execve events into pods # we want to catch all execve events into a pod - condition: evt.type = execve and k8sres.pod.id != "" - output: -> Triggered (pod_name=%k8sres.pod.name pod_id=%k8sres.pod.id pod_ip=%k8sres.pod.ip pod_namespace_name=%k8sres.ns.name pod_deployment_name=%k8sres.deployment.name pod_rs_name=%k8sres.rs.name pod_services_names=%k8sres.svc.name) + condition: evt.type = execve and k8smeta.pod.id != "" + output: -> Triggered (pod_name=%k8smeta.pod.name pod_id=%k8smeta.pod.id pod_ip=%k8smeta.pod.ip pod_namespace_name=%k8smeta.ns.name pod_deployment_name=%k8smeta.deployment.name pod_rs_name=%k8smeta.rs.name pod_services_names=%k8smeta.svc.name) priority: WARNING diff --git a/plugins/k8smeta/test/src/check_events.cpp b/plugins/k8smeta/test/src/check_events.cpp index 36e97b25..6e4847f1 100644 --- a/plugins/k8smeta/test/src/check_events.cpp +++ b/plugins/k8smeta/test/src/check_events.cpp @@ -228,36 +228,36 @@ TEST_F(sinsp_with_test_input, plugin_k8s_fields_existance) // Obtain an event to assert the filterchecks presence against it. auto evt = generate_random_event(INIT_TID); - ASSERT_TRUE(field_exists(evt, "k8sres.pod.name", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.pod.id", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.pod.label[exists]", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.pod.labels", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.pod.ip", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.ns.name", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.ns.id", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.ns.label[exists]", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.ns.labels", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.deployment.name", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.deployment.id", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.deployment.label[exists]", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.deployment.labels", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.svc.name", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.svc.id", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.svc.label[exists]", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.svc.labels", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.rs.name", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.rs.id", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.rs.label[exists]", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.rs.labels", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.rc.name", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.rc.id", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.rc.label[exists]", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8sres.rc.labels", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.pod.name", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.pod.id", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.pod.label[exists]", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.pod.labels", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.pod.ip", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.ns.name", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.ns.id", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.ns.label[exists]", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.ns.labels", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.deployment.name", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.deployment.id", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.deployment.label[exists]", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.deployment.labels", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.svc.name", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.svc.id", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.svc.label[exists]", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.svc.labels", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.rs.name", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.rs.id", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.rs.label[exists]", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.rs.labels", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.rc.name", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.rc.id", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.rc.label[exists]", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.rc.labels", pl_flist)); // The label field must always have an argument with `[]` notation - ASSERT_THROW(field_exists(evt, "k8sres.pod.label.notexists", pl_flist), + ASSERT_THROW(field_exists(evt, "k8smeta.pod.label.notexists", pl_flist), sinsp_exception); - ASSERT_THROW(field_exists(evt, "k8sres.ns.labelnotexists", pl_flist), + ASSERT_THROW(field_exists(evt, "k8smeta.ns.labelnotexists", pl_flist), sinsp_exception); } @@ -318,114 +318,114 @@ TEST_F(sinsp_with_test_input, plugin_k8s_pod_refs) GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); // K8S_POD_NAME - ASSERT_EQ(get_field_as_string(evt, "k8sres.pod.name", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.name", pl_flist), "metrics-server-85d6fcf458-tqkcv"); // K8S_POD_ID - ASSERT_EQ(get_field_as_string(evt, "k8sres.pod.id", pl_flist), pod_uid); + ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.id", pl_flist), pod_uid); // K8S_POD_LABEL - ASSERT_TRUE(field_exists(evt, "k8sres.pod.label[no]", pl_flist)); - ASSERT_EQ(get_field_as_string(evt, "k8sres.pod.label[k8s-app]", pl_flist), + ASSERT_TRUE(field_exists(evt, "k8smeta.pod.label[no]", pl_flist)); + ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.label[k8s-app]", pl_flist), "metrics-server"); // K8S_POD_LABELS - ASSERT_TRUE(field_exists(evt, "k8sres.pod.labels", pl_flist)); - ASSERT_EQ(get_field_as_string(evt, "k8sres.pod.labels", pl_flist), + ASSERT_TRUE(field_exists(evt, "k8smeta.pod.labels", pl_flist)); + ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.labels", pl_flist), "(pod-template-hash:85d6fcf458,k8s-app:metrics-server)"); // K8S_POD_IP - ASSERT_EQ(get_field_as_string(evt, "k8sres.pod.ip", pl_flist), "10.16.1.2"); + ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.ip", pl_flist), "10.16.1.2"); // K8S_NS_NAME - ASSERT_EQ(get_field_as_string(evt, "k8sres.ns.name", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.ns.name", pl_flist), "kube-system"); // K8S_NS_ID - ASSERT_EQ(get_field_as_string(evt, "k8sres.ns.id", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.ns.id", pl_flist), "c51d0620-b1e1-449a-a6f2-9f96830831a9"); // K8S_NS_LABEL - ASSERT_TRUE(field_exists(evt, "k8sres.ns.label[no]", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.ns.label[no]", pl_flist)); ASSERT_EQ(get_field_as_string( - evt, "k8sres.ns.label[kubernetes.io/metadata.name]", + evt, "k8smeta.ns.label[kubernetes.io/metadata.name]", pl_flist), "kube-system"); // K8S_NS_LABELS - ASSERT_TRUE(field_exists(evt, "k8sres.ns.labels", pl_flist)); - ASSERT_EQ(get_field_as_string(evt, "k8sres.ns.labels", pl_flist), + ASSERT_TRUE(field_exists(evt, "k8smeta.ns.labels", pl_flist)); + ASSERT_EQ(get_field_as_string(evt, "k8smeta.ns.labels", pl_flist), "(kubernetes.io/metadata.name:kube-system)"); // K8S_DEPLOYMENT_NAME - ASSERT_EQ(get_field_as_string(evt, "k8sres.deployment.name", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.deployment.name", pl_flist), "metrics-server"); // K8S_DEPLOYMENT_ID - ASSERT_EQ(get_field_as_string(evt, "k8sres.deployment.id", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.deployment.id", pl_flist), "e56cf37d-5b8b-4b2d-b7bc-3316a3d72e93"); // K8S_DEPLOYMENT_LABEL - ASSERT_TRUE(field_exists(evt, "k8sres.deployment.label[no]", pl_flist)); - ASSERT_EQ(get_field_as_string(evt, "k8sres.deployment.label[k8s-app]", + ASSERT_TRUE(field_exists(evt, "k8smeta.deployment.label[no]", pl_flist)); + ASSERT_EQ(get_field_as_string(evt, "k8smeta.deployment.label[k8s-app]", pl_flist), "metrics-server"); // K8S_DEPLOYMENT_LABELS - ASSERT_TRUE(field_exists(evt, "k8sres.deployment.labels", pl_flist)); - ASSERT_EQ(get_field_as_string(evt, "k8sres.deployment.labels", pl_flist), + ASSERT_TRUE(field_exists(evt, "k8smeta.deployment.labels", pl_flist)); + ASSERT_EQ(get_field_as_string(evt, "k8smeta.deployment.labels", pl_flist), "(k8s-app:metrics-server)"); // K8S_SVC_NAME // This field is a list so we have this `( )` notation - ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.name", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.svc.name", pl_flist), "(metrics-server)"); // K8S_SVC_ID - ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.id", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.svc.id", pl_flist), "(b2af0913-1a07-457f-986a-111caa4fb372)"); // K8S_SVC_LABEL - ASSERT_TRUE(field_exists(evt, "k8sres.svc.label[no]", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.svc.label[no]", pl_flist)); // This field is a list so we have this `( )` notation - ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.label[k8s-app]", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.svc.label[k8s-app]", pl_flist), "(metrics-server)"); // K8S_SVC_LABELS - ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.labels", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.svc.labels", pl_flist), "(k8s-app:metrics-server)"); // K8S_RS_NAME - ASSERT_EQ(get_field_as_string(evt, "k8sres.rs.name", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.rs.name", pl_flist), "metrics-server-85d6fcf458"); // K8S_RS_ID - ASSERT_EQ(get_field_as_string(evt, "k8sres.rs.id", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.rs.id", pl_flist), "8be7cb9d-f96a-41b5-8fb0-81fda92a663a"); // K8S_RS_LABEL - ASSERT_TRUE(field_exists(evt, "k8sres.rs.label[no]", pl_flist)); - ASSERT_EQ(get_field_as_string(evt, "k8sres.rs.label[pod-template-hash]", + ASSERT_TRUE(field_exists(evt, "k8smeta.rs.label[no]", pl_flist)); + ASSERT_EQ(get_field_as_string(evt, "k8smeta.rs.label[pod-template-hash]", pl_flist), "85d6fcf458"); // K8S_RS_LABELS - ASSERT_TRUE(field_exists(evt, "k8sres.rs.labels", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.rs.labels", pl_flist)); // This field is a list so we have this `( )` notation - ASSERT_EQ(get_field_as_string(evt, "k8sres.rs.labels", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.rs.labels", pl_flist), "(pod-template-hash:85d6fcf458,k8s-app:metrics-server)"); // K8S_RC_NAME - ASSERT_TRUE(field_exists(evt, "k8sres.rc.name", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.rc.name", pl_flist)); // K8S_RC_ID - ASSERT_TRUE(field_exists(evt, "k8sres.rc.id", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.rc.id", pl_flist)); // K8S_RC_LABEL - ASSERT_TRUE(field_exists(evt, "k8sres.rc.label[no]", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.rc.label[no]", pl_flist)); // K8S_RC_LABELS - ASSERT_TRUE(field_exists(evt, "k8sres.rc.labels", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.rc.labels", pl_flist)); m_inspector.close(); } @@ -451,28 +451,28 @@ TEST_F(sinsp_with_test_input, plugin_k8s_pod_with_2_services) GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); // K8S_POD_ID - ASSERT_EQ(get_field_as_string(evt, "k8sres.pod.id", pl_flist), pod_uid); + ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.id", pl_flist), pod_uid); // K8S_SVC_NAME - ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.name", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.svc.name", pl_flist), "(nginx-service,nginx-service-second-service)"); // K8S_SVC_ID - ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.id", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.svc.id", pl_flist), "(f0fea0cd-24cd-439f-bd51-e7a9100fed40,9e840fbe-93e4-412c-aa23-" "fbe6d03efd08)"); // K8S_SVC_LABEL - ASSERT_TRUE(field_exists(evt, "k8sres.svc.label[no]", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.svc.label[no]", pl_flist)); // Both services have the `app` label - ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.label[app]", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.svc.label[app]", pl_flist), "(custom,custom-2)"); // Only one of the 2 services has the value for the label - ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.label[service]", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.svc.label[service]", pl_flist), "(service1)"); // K8S_SVC_LABELS - ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.labels", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.svc.labels", pl_flist), "(service:service1,app:custom,app:custom-2)"); m_inspector.close(); @@ -498,22 +498,22 @@ TEST_F(sinsp_with_test_input, plugin_k8s_pod_with_repliacation_controller) GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); // K8S_POD_ID - ASSERT_EQ(get_field_as_string(evt, "k8sres.pod.id", pl_flist), pod_uid); + ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.id", pl_flist), pod_uid); // K8S_RC_NAME - ASSERT_EQ(get_field_as_string(evt, "k8sres.rc.name", pl_flist), "nginx"); + ASSERT_EQ(get_field_as_string(evt, "k8smeta.rc.name", pl_flist), "nginx"); // K8S_RC_ID - ASSERT_EQ(get_field_as_string(evt, "k8sres.rc.id", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.rc.id", pl_flist), "f2e2a261-ba86-4fa6-9493-e5260a106126"); // K8S_RC_LABEL - ASSERT_TRUE(field_exists(evt, "k8sres.rc.label[no]", pl_flist)); - ASSERT_EQ(get_field_as_string(evt, "k8sres.rc.label[app]", pl_flist), + ASSERT_TRUE(field_exists(evt, "k8smeta.rc.label[no]", pl_flist)); + ASSERT_EQ(get_field_as_string(evt, "k8smeta.rc.label[app]", pl_flist), "nginx"); // K8S_RC_LABELS - ASSERT_EQ(get_field_as_string(evt, "k8sres.rc.labels", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.rc.labels", pl_flist), "(app:nginx)"); m_inspector.close(); @@ -549,25 +549,25 @@ TEST_F(sinsp_with_test_input, plugin_k8s_delete_namespace_and_deployment) // extract their fields // The pod should be still here - ASSERT_EQ(get_field_as_string(evt, "k8sres.pod.id", pl_flist), pod_uid); + ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.id", pl_flist), pod_uid); // The namespace name is extracted from the pod meta so we still have it - ASSERT_EQ(get_field_as_string(evt, "k8sres.ns.name", pl_flist), "default"); + ASSERT_EQ(get_field_as_string(evt, "k8smeta.ns.name", pl_flist), "default"); // The namespace uid is available because it is obtained from the pod refs - ASSERT_EQ(get_field_as_string(evt, "k8sres.ns.id", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.ns.id", pl_flist), "f7ju8b13-df0c-43bd-8ded-973f4ede66c6"); // The deployment uid is available because it is obtained from the pod refs - ASSERT_EQ(get_field_as_string(evt, "k8sres.deployment.id", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.deployment.id", pl_flist), "920r1601-61b6-4d46-8916-db9f36414722"); // These resources are removed so we shouldn't have fields ASSERT_FALSE(field_has_value( - evt, "k8sres.ns.label[kubernetes.io/metadata.name]", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.ns.labels", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.deployment.name", pl_flist)); + evt, "k8smeta.ns.label[kubernetes.io/metadata.name]", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.ns.labels", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.deployment.name", pl_flist)); ASSERT_FALSE( - field_has_value(evt, "k8sres.deployment.label[k8s-app]", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.deployment.labels", pl_flist)); + field_has_value(evt, "k8smeta.deployment.label[k8s-app]", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.deployment.labels", pl_flist)); m_inspector.close(); } @@ -592,32 +592,32 @@ TEST_F(sinsp_with_test_input, plugin_k8s_delete_a_pod) std::string pod_uid = "0cc0927d-1d9f-4798-926b-451364a4fgjs"; GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); - ASSERT_FALSE(field_has_value(evt, "k8sres.pod.name", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.pod.id", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.pod.label[exists]", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.pod.labels", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.pod.ip", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.ns.name", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.ns.id", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.ns.label[exists]", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.ns.labels", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.deployment.name", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.deployment.id", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.pod.name", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.pod.id", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.pod.label[exists]", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.pod.labels", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.pod.ip", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.ns.name", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.ns.id", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.ns.label[exists]", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.ns.labels", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.deployment.name", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.deployment.id", pl_flist)); ASSERT_FALSE( - field_has_value(evt, "k8sres.deployment.label[exists]", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.deployment.labels", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.svc.name", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.svc.id", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.svc.label[exists]", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.svc.labels", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.rs.name", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.rs.id", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.rs.label[exists]", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.rs.labels", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.rc.name", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.rc.id", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.rc.label[exists]", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8sres.rc.labels", pl_flist)); + field_has_value(evt, "k8smeta.deployment.label[exists]", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.deployment.labels", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.svc.name", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.svc.id", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.svc.label[exists]", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.svc.labels", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.rs.name", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.rs.id", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.rs.label[exists]", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.rs.labels", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.rc.name", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.rc.id", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.rc.label[exists]", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.rc.labels", pl_flist)); // Now we use a pod that still exists and is associated with the same // Namespace and Deployment We want to check that the Namespace and the @@ -625,16 +625,16 @@ TEST_F(sinsp_with_test_input, plugin_k8s_delete_a_pod) pod_uid = "5eaeeca9-2277-460b-a4bf-5a0783f6d49f"; GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); - ASSERT_TRUE(field_has_value(evt, "k8sres.pod.name", pl_flist)); - ASSERT_TRUE(field_has_value(evt, "k8sres.pod.id", pl_flist)); - ASSERT_TRUE(field_has_value(evt, "k8sres.pod.labels", pl_flist)); - ASSERT_TRUE(field_has_value(evt, "k8sres.pod.ip", pl_flist)); - ASSERT_TRUE(field_has_value(evt, "k8sres.ns.name", pl_flist)); - ASSERT_TRUE(field_has_value(evt, "k8sres.ns.id", pl_flist)); - ASSERT_TRUE(field_has_value(evt, "k8sres.ns.labels", pl_flist)); - ASSERT_TRUE(field_has_value(evt, "k8sres.deployment.name", pl_flist)); - ASSERT_TRUE(field_has_value(evt, "k8sres.deployment.id", pl_flist)); - ASSERT_TRUE(field_has_value(evt, "k8sres.deployment.labels", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8smeta.pod.name", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8smeta.pod.id", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8smeta.pod.labels", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8smeta.pod.ip", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8smeta.ns.name", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8smeta.ns.id", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8smeta.ns.labels", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8smeta.deployment.name", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8smeta.deployment.id", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8smeta.deployment.labels", pl_flist)); m_inspector.close(); } @@ -659,7 +659,7 @@ TEST_F(sinsp_with_test_input, plugin_k8s_update_a_pod) GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); // After 2 "Updated" events the pod has 2 services - ASSERT_EQ(get_field_as_string(evt, "k8sres.svc.id", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.svc.id", pl_flist), "(f0fea0cd-24cd-439f-bd51-e7a9100fed40,9e840fbe-93e4-412c-aa23-" "fbe6d03efd08)"); @@ -668,7 +668,7 @@ TEST_F(sinsp_with_test_input, plugin_k8s_update_a_pod) GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); // After 2 "Updated" events the pod has 2 services - ASSERT_EQ(get_field_as_string(evt, "k8sres.pod.ip", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.ip", pl_flist), "10.16.1.20"); m_inspector.close(); } From 1cc715990c931a54d90168f92997c97f8da2ad39 Mon Sep 17 00:00:00 2001 From: Andrea Terzolo Date: Fri, 15 Dec 2023 16:51:26 +0100 Subject: [PATCH 5/6] cleanup: rename `.id` into `.uid` Signed-off-by: Andrea Terzolo Co-authored-by: Melissa Kilby --- plugins/k8smeta/README.md | 54 ++++++++------ plugins/k8smeta/src/plugin.cpp | 34 ++++----- plugins/k8smeta/src/plugin.h | 12 +-- plugins/k8smeta/test/rules/example_rule.yaml | 4 +- plugins/k8smeta/test/src/check_events.cpp | 78 ++++++++++---------- 5 files changed, 97 insertions(+), 85 deletions(-) diff --git a/plugins/k8smeta/README.md b/plugins/k8smeta/README.md index e1bd6f13..3ec4f155 100644 --- a/plugins/k8smeta/README.md +++ b/plugins/k8smeta/README.md @@ -19,33 +19,33 @@ The `k8smeta` plugin implements these capabilities: ### Supported Fields -| NAME | TYPE | ARG | DESCRIPTION | -|----------------------------|-----------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `k8smeta.pod.name` | `string` | None | Kubernetes pod name. | -| `k8smeta.pod.id` | `string` | None | Kubernetes pod ID. | +| NAME | TYPE | ARG | DESCRIPTION | +|-----------------------------|-----------------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `k8smeta.pod.name` | `string` | None | Kubernetes pod name. | +| `k8smeta.pod.uid` | `string` | None | Kubernetes pod UID. | | `k8smeta.pod.label` | `string` | Key, Required | Kubernetes pod label. E.g. 'k8smeta.pod.label[foo]'. | -| `k8smeta.pod.labels` | `string (list)` | None | Kubernetes pod comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | -| `k8smeta.pod.ip` | `string` | None | Kubernetes pod ip | -| `k8smeta.ns.name` | `string` | None | Kubernetes namespace name. | -| `k8smeta.ns.id` | `string` | None | Kubernetes namespace ID. | +| `k8smeta.pod.labels` | `string (list)` | None | Kubernetes pod comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | +| `k8smeta.pod.ip` | `string` | None | Kubernetes pod ip | +| `k8smeta.ns.name` | `string` | None | Kubernetes namespace name. | +| `k8smeta.ns.uid` | `string` | None | Kubernetes namespace UID. | | `k8smeta.ns.label` | `string` | Key, Required | Kubernetes namespace label. E.g. 'k8smeta.ns.label[foo]'. | -| `k8smeta.ns.labels` | `string (list)` | None | Kubernetes namespace comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | -| `k8smeta.deployment.name` | `string` | None | Kubernetes deployment name. | -| `k8smeta.deployment.id` | `string` | None | Kubernetes deployment ID. | +| `k8smeta.ns.labels` | `string (list)` | None | Kubernetes namespace comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | +| `k8smeta.deployment.name` | `string` | None | Kubernetes deployment name. | +| `k8smeta.deployment.uid` | `string` | None | Kubernetes deployment UID. | | `k8smeta.deployment.label` | `string` | Key, Required | Kubernetes deployment label. E.g. 'k8smeta.rs.label[foo]'. | -| `k8smeta.deployment.labels` | `string (list)` | None | Kubernetes deployment comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | -| `k8smeta.svc.name` | `string (list)` | None | Kubernetes services name. Return a list with all the names of the services associated with the current pod. E.g. '(service1,service2)' | -| `k8smeta.svc.id` | `string (list)` | None | Kubernetes services ID. Return a list with all the IDs of the services associated with the current pod. E.g. '(88279776-941c-491e-8da1-95ef30f50fe8,149e72f4-a570-4282-bfa0-25307c5007e8)' | +| `k8smeta.deployment.labels` | `string (list)` | None | Kubernetes deployment comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | +| `k8smeta.svc.name` | `string (list)` | None | Kubernetes services name. Return a list with all the names of the services associated with the current pod. E.g. '(service1,service2)' | +| `k8smeta.svc.uid` | `string (list)` | None | Kubernetes services UID. Return a list with all the UIDs of the services associated with the current pod. E.g. '(88279776-941c-491e-8da1-95ef30f50fe8,149e72f4-a570-4282-bfa0-25307c5007e8)' | | `k8smeta.svc.label` | `string (list)` | Key, Required | Kubernetes services label. If the services associated with the current pod have a label with this name, return the list of label's values. E.g. if the current pod has 2 services associated and both have the 'foo' label, 'k8smeta.svc.label[foo]' will return '(service1-label-value,service2-label-value) | -| `k8smeta.svc.labels` | `string (list)` | None | Kubernetes services labels. Return a list with all the comma-separated key/value labels of the services associated with the current pod. E.g. '(foo1:bar1,foo2:bar2)' | -| `k8smeta.rs.name` | `string` | None | Kubernetes replica set name. | -| `k8smeta.rs.id` | `string` | None | Kubernetes replica set ID. | +| `k8smeta.svc.labels` | `string (list)` | None | Kubernetes services labels. Return a list with all the comma-separated key/value labels of the services associated with the current pod. E.g. '(foo1:bar1,foo2:bar2)' | +| `k8smeta.rs.name` | `string` | None | Kubernetes replica set name. | +| `k8smeta.rs.uid` | `string` | None | Kubernetes replica set UID. | | `k8smeta.rs.label` | `string` | Key, Required | Kubernetes replica set label. E.g. 'k8smeta.rs.label[foo]'. | -| `k8smeta.rs.labels` | `string (list)` | None | Kubernetes replica set comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | -| `k8smeta.rc.name` | `string` | None | Kubernetes replication controller name. | -| `k8smeta.rc.id` | `string` | None | Kubernetes replication controller ID. | +| `k8smeta.rs.labels` | `string (list)` | None | Kubernetes replica set comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | +| `k8smeta.rc.name` | `string` | None | Kubernetes replication controller name. | +| `k8smeta.rc.uid` | `string` | None | Kubernetes replication controller UID. | | `k8smeta.rc.label` | `string` | Key, Required | Kubernetes replication controller label. E.g. 'k8smeta.rc.label[foo]'. | -| `k8smeta.rc.labels` | `string (list)` | None | Kubernetes replication controller comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | +| `k8smeta.rc.labels` | `string (list)` | None | Kubernetes replication controller comma-separated key/value labels. E.g. '(foo1:bar1,foo2:bar2)'. | ## Usage @@ -77,3 +77,15 @@ The plugin doesn't have open params ### Rule Example To see how to use the plugin fields in a Falco rule check the example rule `/k8smeta/test/rules/example_rule.yaml`. + +### Build the plugin on a fresh Ubuntu 22.04 machine + +```bash +sudo apt update -y +sudo apt install -y cmake build-essential autoconf libtool pkg-config +git clone https://github.com/falcosecurity/plugins.git +cd plugins/k8smeta +mkdir build && cd build +cmake .. +make k8smeta -j10 +``` diff --git a/plugins/k8smeta/src/plugin.cpp b/plugins/k8smeta/src/plugin.cpp index 79bb1824..592ccf48 100644 --- a/plugins/k8smeta/src/plugin.cpp +++ b/plugins/k8smeta/src/plugin.cpp @@ -337,7 +337,7 @@ std::vector my_plugin::get_fields() const falcosecurity::field_info fields[] = { {ft::FTYPE_STRING, "k8smeta.pod.name", "Pod Name", "Kubernetes pod name."}, - {ft::FTYPE_STRING, "k8smeta.pod.id", "Pod ID", "Kubernetes pod ID."}, + {ft::FTYPE_STRING, "k8smeta.pod.uid", "Pod UID", "Kubernetes pod UID."}, {ft::FTYPE_STRING, "k8smeta.pod.label", "Pod Label", @@ -351,8 +351,8 @@ std::vector my_plugin::get_fields() {ft::FTYPE_STRING, "k8smeta.ns.name", "Namespace Name", "Kubernetes namespace name."}, - {ft::FTYPE_STRING, "k8smeta.ns.id", "Namespace ID", - "Kubernetes namespace ID."}, + {ft::FTYPE_STRING, "k8smeta.ns.uid", "Namespace UID", + "Kubernetes namespace UID."}, {ft::FTYPE_STRING, "k8smeta.ns.label", "Namespace Label", @@ -365,8 +365,8 @@ std::vector my_plugin::get_fields() {ft::FTYPE_STRING, "k8smeta.deployment.name", "Deployment Name", "Kubernetes deployment name."}, - {ft::FTYPE_STRING, "k8smeta.deployment.id", "Deployment ID", - "Kubernetes deployment ID."}, + {ft::FTYPE_STRING, "k8smeta.deployment.uid", "Deployment UID", + "Kubernetes deployment UID."}, {ft::FTYPE_STRING, "k8smeta.deployment.label", "Deployment Label", @@ -382,8 +382,8 @@ std::vector my_plugin::get_fields() "the services associated with the " "current pod. E.g. '(service1,service2)'", falcosecurity::field_arg(), true}, - {ft::FTYPE_STRING, "k8smeta.svc.id", "Services ID", - "Kubernetes services ID. Return a list with all the IDs of the " + {ft::FTYPE_STRING, "k8smeta.svc.uid", "Services UID", + "Kubernetes services UID. Return a list with all the UIDs of the " "services associated with the " "current pod. E.g. " "'(88279776-941c-491e-8da1-95ef30f50fe8,149e72f4-a570-4282-bfa0-" @@ -409,8 +409,8 @@ std::vector my_plugin::get_fields() {ft::FTYPE_STRING, "k8smeta.rs.name", "Replica Set Name", "Kubernetes replica set name."}, - {ft::FTYPE_STRING, "k8smeta.rs.id", "Replica Set ID", - "Kubernetes replica set ID."}, + {ft::FTYPE_STRING, "k8smeta.rs.uid", "Replica Set UID", + "Kubernetes replica set UID."}, {ft::FTYPE_STRING, "k8smeta.rs.label", "Replica Set Label", @@ -423,8 +423,8 @@ std::vector my_plugin::get_fields() {ft::FTYPE_STRING, "k8smeta.rc.name", "Replication Controller Name", "Kubernetes replication controller name."}, - {ft::FTYPE_STRING, "k8smeta.rc.id", "Replication Controller ID", - "Kubernetes replication controller ID."}, + {ft::FTYPE_STRING, "k8smeta.rc.uid", "Replication Controller UID", + "Kubernetes replication controller UID."}, {ft::FTYPE_STRING, "k8smeta.rc.label", "Replication Controller Label", @@ -943,7 +943,7 @@ bool my_plugin::extract(const falcosecurity::extract_fields_input& in) { case K8S_POD_NAME: return extract_name_from_meta(pod_layout.meta, req); - case K8S_POD_ID: + case K8S_POD_UID: req.set_value(pod_uid, true); break; case K8S_POD_LABEL: @@ -982,7 +982,7 @@ bool my_plugin::extract(const falcosecurity::extract_fields_input& in) req.set_value(pod_namespace_name, true); break; } - case K8S_NS_ID: + case K8S_NS_UID: return extract_uid_from_refs(pod_layout.refs, NS, req); case K8S_NS_LABEL: return extract_label_value_from_refs(pod_layout.refs, NS, req); @@ -993,7 +993,7 @@ bool my_plugin::extract(const falcosecurity::extract_fields_input& in) // another under some circumstances. case K8S_DEPLOYMENT_NAME: return extract_name_from_refs(pod_layout.refs, DEPLOYMENT, req); - case K8S_DEPLOYMENT_ID: + case K8S_DEPLOYMENT_UID: return extract_uid_from_refs(pod_layout.refs, DEPLOYMENT, req); case K8S_DEPLOYMENT_LABEL: return extract_label_value_from_refs(pod_layout.refs, DEPLOYMENT, req); @@ -1001,7 +1001,7 @@ bool my_plugin::extract(const falcosecurity::extract_fields_input& in) return extract_labels_from_refs(pod_layout.refs, DEPLOYMENT, req); case K8S_SVC_NAME: return extract_name_array_from_refs(pod_layout.refs, SVC, req); - case K8S_SVC_ID: + case K8S_SVC_UID: return extract_uid_array_from_refs(pod_layout.refs, SVC, req); case K8S_SVC_LABEL: return extract_label_value_array_from_refs(pod_layout.refs, SVC, req); @@ -1012,7 +1012,7 @@ bool my_plugin::extract(const falcosecurity::extract_fields_input& in) // another under some circumstances. case K8S_RS_NAME: return extract_name_from_refs(pod_layout.refs, RS, req); - case K8S_RS_ID: + case K8S_RS_UID: return extract_uid_from_refs(pod_layout.refs, RS, req); case K8S_RS_LABEL: return extract_label_value_from_refs(pod_layout.refs, RS, req); @@ -1023,7 +1023,7 @@ bool my_plugin::extract(const falcosecurity::extract_fields_input& in) // replicationController to another under some circumstances. case K8S_RC_NAME: return extract_name_from_refs(pod_layout.refs, RC, req); - case K8S_RC_ID: + case K8S_RC_UID: return extract_uid_from_refs(pod_layout.refs, RC, req); case K8S_RC_LABEL: return extract_label_value_from_refs(pod_layout.refs, RC, req); diff --git a/plugins/k8smeta/src/plugin.h b/plugins/k8smeta/src/plugin.h index a998910c..dc51d9b8 100644 --- a/plugins/k8smeta/src/plugin.h +++ b/plugins/k8smeta/src/plugin.h @@ -60,28 +60,28 @@ class my_plugin enum K8sFields { K8S_POD_NAME, - K8S_POD_ID, + K8S_POD_UID, K8S_POD_LABEL, K8S_POD_LABELS, K8S_POD_IP, K8S_NS_NAME, - K8S_NS_ID, + K8S_NS_UID, K8S_NS_LABEL, K8S_NS_LABELS, K8S_DEPLOYMENT_NAME, - K8S_DEPLOYMENT_ID, + K8S_DEPLOYMENT_UID, K8S_DEPLOYMENT_LABEL, K8S_DEPLOYMENT_LABELS, K8S_SVC_NAME, - K8S_SVC_ID, + K8S_SVC_UID, K8S_SVC_LABEL, K8S_SVC_LABELS, K8S_RS_NAME, - K8S_RS_ID, + K8S_RS_UID, K8S_RS_LABEL, K8S_RS_LABELS, K8S_RC_NAME, - K8S_RC_ID, + K8S_RC_UID, K8S_RC_LABEL, K8S_RC_LABELS, K8S_FIELD_MAX diff --git a/plugins/k8smeta/test/rules/example_rule.yaml b/plugins/k8smeta/test/rules/example_rule.yaml index 2329d0b6..b0bdad76 100644 --- a/plugins/k8smeta/test/rules/example_rule.yaml +++ b/plugins/k8smeta/test/rules/example_rule.yaml @@ -1,6 +1,6 @@ - rule: Example rule for k8s plugin desc: Detect execve events into pods # we want to catch all execve events into a pod - condition: evt.type = execve and k8smeta.pod.id != "" - output: -> Triggered (pod_name=%k8smeta.pod.name pod_id=%k8smeta.pod.id pod_ip=%k8smeta.pod.ip pod_namespace_name=%k8smeta.ns.name pod_deployment_name=%k8smeta.deployment.name pod_rs_name=%k8smeta.rs.name pod_services_names=%k8smeta.svc.name) + condition: evt.type = execve and k8smeta.pod.uid != "" + output: -> Triggered (pod_name=%k8smeta.pod.name pod_id=%k8smeta.pod.uid pod_ip=%k8smeta.pod.ip pod_namespace_name=%k8smeta.ns.name pod_deployment_name=%k8smeta.deployment.name pod_rs_name=%k8smeta.rs.name pod_services_names=%k8smeta.svc.name) priority: WARNING diff --git a/plugins/k8smeta/test/src/check_events.cpp b/plugins/k8smeta/test/src/check_events.cpp index 6e4847f1..90b4983c 100644 --- a/plugins/k8smeta/test/src/check_events.cpp +++ b/plugins/k8smeta/test/src/check_events.cpp @@ -229,28 +229,28 @@ TEST_F(sinsp_with_test_input, plugin_k8s_fields_existance) // Obtain an event to assert the filterchecks presence against it. auto evt = generate_random_event(INIT_TID); ASSERT_TRUE(field_exists(evt, "k8smeta.pod.name", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8smeta.pod.id", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.pod.uid", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.pod.label[exists]", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.pod.labels", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.pod.ip", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.ns.name", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8smeta.ns.id", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.ns.uid", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.ns.label[exists]", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.ns.labels", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.deployment.name", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8smeta.deployment.id", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.deployment.uid", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.deployment.label[exists]", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.deployment.labels", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.svc.name", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8smeta.svc.id", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.svc.uid", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.svc.label[exists]", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.svc.labels", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.rs.name", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8smeta.rs.id", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.rs.uid", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.rs.label[exists]", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.rs.labels", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.rc.name", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8smeta.rc.id", pl_flist)); + ASSERT_TRUE(field_exists(evt, "k8smeta.rc.uid", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.rc.label[exists]", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.rc.labels", pl_flist)); @@ -321,8 +321,8 @@ TEST_F(sinsp_with_test_input, plugin_k8s_pod_refs) ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.name", pl_flist), "metrics-server-85d6fcf458-tqkcv"); - // K8S_POD_ID - ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.id", pl_flist), pod_uid); + // K8S_POD_UID + ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.uid", pl_flist), pod_uid); // K8S_POD_LABEL ASSERT_TRUE(field_exists(evt, "k8smeta.pod.label[no]", pl_flist)); @@ -341,8 +341,8 @@ TEST_F(sinsp_with_test_input, plugin_k8s_pod_refs) ASSERT_EQ(get_field_as_string(evt, "k8smeta.ns.name", pl_flist), "kube-system"); - // K8S_NS_ID - ASSERT_EQ(get_field_as_string(evt, "k8smeta.ns.id", pl_flist), + // K8S_NS_UID + ASSERT_EQ(get_field_as_string(evt, "k8smeta.ns.uid", pl_flist), "c51d0620-b1e1-449a-a6f2-9f96830831a9"); // K8S_NS_LABEL @@ -361,8 +361,8 @@ TEST_F(sinsp_with_test_input, plugin_k8s_pod_refs) ASSERT_EQ(get_field_as_string(evt, "k8smeta.deployment.name", pl_flist), "metrics-server"); - // K8S_DEPLOYMENT_ID - ASSERT_EQ(get_field_as_string(evt, "k8smeta.deployment.id", pl_flist), + // K8S_DEPLOYMENT_UID + ASSERT_EQ(get_field_as_string(evt, "k8smeta.deployment.uid", pl_flist), "e56cf37d-5b8b-4b2d-b7bc-3316a3d72e93"); // K8S_DEPLOYMENT_LABEL @@ -381,8 +381,8 @@ TEST_F(sinsp_with_test_input, plugin_k8s_pod_refs) ASSERT_EQ(get_field_as_string(evt, "k8smeta.svc.name", pl_flist), "(metrics-server)"); - // K8S_SVC_ID - ASSERT_EQ(get_field_as_string(evt, "k8smeta.svc.id", pl_flist), + // K8S_SVC_UID + ASSERT_EQ(get_field_as_string(evt, "k8smeta.svc.uid", pl_flist), "(b2af0913-1a07-457f-986a-111caa4fb372)"); // K8S_SVC_LABEL @@ -399,8 +399,8 @@ TEST_F(sinsp_with_test_input, plugin_k8s_pod_refs) ASSERT_EQ(get_field_as_string(evt, "k8smeta.rs.name", pl_flist), "metrics-server-85d6fcf458"); - // K8S_RS_ID - ASSERT_EQ(get_field_as_string(evt, "k8smeta.rs.id", pl_flist), + // K8S_RS_UID + ASSERT_EQ(get_field_as_string(evt, "k8smeta.rs.uid", pl_flist), "8be7cb9d-f96a-41b5-8fb0-81fda92a663a"); // K8S_RS_LABEL @@ -418,8 +418,8 @@ TEST_F(sinsp_with_test_input, plugin_k8s_pod_refs) // K8S_RC_NAME ASSERT_TRUE(field_exists(evt, "k8smeta.rc.name", pl_flist)); - // K8S_RC_ID - ASSERT_TRUE(field_exists(evt, "k8smeta.rc.id", pl_flist)); + // K8S_RC_UID + ASSERT_TRUE(field_exists(evt, "k8smeta.rc.uid", pl_flist)); // K8S_RC_LABEL ASSERT_TRUE(field_exists(evt, "k8smeta.rc.label[no]", pl_flist)); @@ -450,15 +450,15 @@ TEST_F(sinsp_with_test_input, plugin_k8s_pod_with_2_services) std::string pod_uid = "0cc53e7d-1d9f-4798-926b-451364a4ec8e"; GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); - // K8S_POD_ID - ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.id", pl_flist), pod_uid); + // K8S_POD_UID + ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.uid", pl_flist), pod_uid); // K8S_SVC_NAME ASSERT_EQ(get_field_as_string(evt, "k8smeta.svc.name", pl_flist), "(nginx-service,nginx-service-second-service)"); - // K8S_SVC_ID - ASSERT_EQ(get_field_as_string(evt, "k8smeta.svc.id", pl_flist), + // K8S_SVC_UID + ASSERT_EQ(get_field_as_string(evt, "k8smeta.svc.uid", pl_flist), "(f0fea0cd-24cd-439f-bd51-e7a9100fed40,9e840fbe-93e4-412c-aa23-" "fbe6d03efd08)"); @@ -497,14 +497,14 @@ TEST_F(sinsp_with_test_input, plugin_k8s_pod_with_repliacation_controller) std::string pod_uid = "00e704ac-77d1-4aac-80af-31233b277889"; GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); - // K8S_POD_ID - ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.id", pl_flist), pod_uid); + // K8S_POD_UID + ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.uid", pl_flist), pod_uid); // K8S_RC_NAME ASSERT_EQ(get_field_as_string(evt, "k8smeta.rc.name", pl_flist), "nginx"); - // K8S_RC_ID - ASSERT_EQ(get_field_as_string(evt, "k8smeta.rc.id", pl_flist), + // K8S_RC_UID + ASSERT_EQ(get_field_as_string(evt, "k8smeta.rc.uid", pl_flist), "f2e2a261-ba86-4fa6-9493-e5260a106126"); // K8S_RC_LABEL @@ -549,15 +549,15 @@ TEST_F(sinsp_with_test_input, plugin_k8s_delete_namespace_and_deployment) // extract their fields // The pod should be still here - ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.id", pl_flist), pod_uid); + ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.uid", pl_flist), pod_uid); // The namespace name is extracted from the pod meta so we still have it ASSERT_EQ(get_field_as_string(evt, "k8smeta.ns.name", pl_flist), "default"); // The namespace uid is available because it is obtained from the pod refs - ASSERT_EQ(get_field_as_string(evt, "k8smeta.ns.id", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.ns.uid", pl_flist), "f7ju8b13-df0c-43bd-8ded-973f4ede66c6"); // The deployment uid is available because it is obtained from the pod refs - ASSERT_EQ(get_field_as_string(evt, "k8smeta.deployment.id", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.deployment.uid", pl_flist), "920r1601-61b6-4d46-8916-db9f36414722"); // These resources are removed so we shouldn't have fields @@ -593,29 +593,29 @@ TEST_F(sinsp_with_test_input, plugin_k8s_delete_a_pod) GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); ASSERT_FALSE(field_has_value(evt, "k8smeta.pod.name", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8smeta.pod.id", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.pod.uid", pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.pod.label[exists]", pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.pod.labels", pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.pod.ip", pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.ns.name", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8smeta.ns.id", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.ns.uid", pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.ns.label[exists]", pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.ns.labels", pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.deployment.name", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8smeta.deployment.id", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.deployment.uid", pl_flist)); ASSERT_FALSE( field_has_value(evt, "k8smeta.deployment.label[exists]", pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.deployment.labels", pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.svc.name", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8smeta.svc.id", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.svc.uid", pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.svc.label[exists]", pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.svc.labels", pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.rs.name", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8smeta.rs.id", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.rs.uid", pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.rs.label[exists]", pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.rs.labels", pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.rc.name", pl_flist)); - ASSERT_FALSE(field_has_value(evt, "k8smeta.rc.id", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.rc.uid", pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.rc.label[exists]", pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.rc.labels", pl_flist)); @@ -626,14 +626,14 @@ TEST_F(sinsp_with_test_input, plugin_k8s_delete_a_pod) GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); ASSERT_TRUE(field_has_value(evt, "k8smeta.pod.name", pl_flist)); - ASSERT_TRUE(field_has_value(evt, "k8smeta.pod.id", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8smeta.pod.uid", pl_flist)); ASSERT_TRUE(field_has_value(evt, "k8smeta.pod.labels", pl_flist)); ASSERT_TRUE(field_has_value(evt, "k8smeta.pod.ip", pl_flist)); ASSERT_TRUE(field_has_value(evt, "k8smeta.ns.name", pl_flist)); - ASSERT_TRUE(field_has_value(evt, "k8smeta.ns.id", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8smeta.ns.uid", pl_flist)); ASSERT_TRUE(field_has_value(evt, "k8smeta.ns.labels", pl_flist)); ASSERT_TRUE(field_has_value(evt, "k8smeta.deployment.name", pl_flist)); - ASSERT_TRUE(field_has_value(evt, "k8smeta.deployment.id", pl_flist)); + ASSERT_TRUE(field_has_value(evt, "k8smeta.deployment.uid", pl_flist)); ASSERT_TRUE(field_has_value(evt, "k8smeta.deployment.labels", pl_flist)); m_inspector.close(); @@ -659,7 +659,7 @@ TEST_F(sinsp_with_test_input, plugin_k8s_update_a_pod) GENERATE_EXECVE_EVENT_FOR_INIT(pod_uid); // After 2 "Updated" events the pod has 2 services - ASSERT_EQ(get_field_as_string(evt, "k8smeta.svc.id", pl_flist), + ASSERT_EQ(get_field_as_string(evt, "k8smeta.svc.uid", pl_flist), "(f0fea0cd-24cd-439f-bd51-e7a9100fed40,9e840fbe-93e4-412c-aa23-" "fbe6d03efd08)"); From c1992723881d75b65e92227b53b8397f186562ef Mon Sep 17 00:00:00 2001 From: Andrea Terzolo Date: Fri, 15 Dec 2023 18:02:44 +0100 Subject: [PATCH 6/6] cleanup: fix clang-format issues Signed-off-by: Andrea Terzolo --- plugins/k8smeta/src/plugin.cpp | 3 ++- plugins/k8smeta/test/src/check_events.cpp | 10 ++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/plugins/k8smeta/src/plugin.cpp b/plugins/k8smeta/src/plugin.cpp index 592ccf48..33f3196b 100644 --- a/plugins/k8smeta/src/plugin.cpp +++ b/plugins/k8smeta/src/plugin.cpp @@ -337,7 +337,8 @@ std::vector my_plugin::get_fields() const falcosecurity::field_info fields[] = { {ft::FTYPE_STRING, "k8smeta.pod.name", "Pod Name", "Kubernetes pod name."}, - {ft::FTYPE_STRING, "k8smeta.pod.uid", "Pod UID", "Kubernetes pod UID."}, + {ft::FTYPE_STRING, "k8smeta.pod.uid", "Pod UID", + "Kubernetes pod UID."}, {ft::FTYPE_STRING, "k8smeta.pod.label", "Pod Label", diff --git a/plugins/k8smeta/test/src/check_events.cpp b/plugins/k8smeta/test/src/check_events.cpp index 90b4983c..b154dcc4 100644 --- a/plugins/k8smeta/test/src/check_events.cpp +++ b/plugins/k8smeta/test/src/check_events.cpp @@ -239,7 +239,8 @@ TEST_F(sinsp_with_test_input, plugin_k8s_fields_existance) ASSERT_TRUE(field_exists(evt, "k8smeta.ns.labels", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.deployment.name", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.deployment.uid", pl_flist)); - ASSERT_TRUE(field_exists(evt, "k8smeta.deployment.label[exists]", pl_flist)); + ASSERT_TRUE( + field_exists(evt, "k8smeta.deployment.label[exists]", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.deployment.labels", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.svc.name", pl_flist)); ASSERT_TRUE(field_exists(evt, "k8smeta.svc.uid", pl_flist)); @@ -335,7 +336,8 @@ TEST_F(sinsp_with_test_input, plugin_k8s_pod_refs) "(pod-template-hash:85d6fcf458,k8s-app:metrics-server)"); // K8S_POD_IP - ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.ip", pl_flist), "10.16.1.2"); + ASSERT_EQ(get_field_as_string(evt, "k8smeta.pod.ip", pl_flist), + "10.16.1.2"); // K8S_NS_NAME ASSERT_EQ(get_field_as_string(evt, "k8smeta.ns.name", pl_flist), @@ -565,8 +567,8 @@ TEST_F(sinsp_with_test_input, plugin_k8s_delete_namespace_and_deployment) evt, "k8smeta.ns.label[kubernetes.io/metadata.name]", pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.ns.labels", pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.deployment.name", pl_flist)); - ASSERT_FALSE( - field_has_value(evt, "k8smeta.deployment.label[k8s-app]", pl_flist)); + ASSERT_FALSE(field_has_value(evt, "k8smeta.deployment.label[k8s-app]", + pl_flist)); ASSERT_FALSE(field_has_value(evt, "k8smeta.deployment.labels", pl_flist)); m_inspector.close();