From c88abff3e8ee99493652fbd22d9636bf0fd24456 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 10 May 2024 17:50:54 +0100 Subject: [PATCH 1/6] cmd/k8s-operator,cmd/containerboot,ipn,k8s-operator: turn off stateful filter for egress proxies. (#12088) Turn off stateful filtering for egress proxies to allow cluster traffic to be forwarded to tailnet. Allow configuring stateful filter via tailscaled config file. Deprecate EXPERIMENTAL_TS_CONFIGFILE_PATH env var and introduce a new TS_EXPERIMENTAL_VERSIONED_CONFIG env var that can be used to provide containerboot a directory that should contain one or more tailscaled config files named cap-.hujson. Containerboot will pick the one with the newest capability version that is not newer than its current capability version. Proxies with this change will not work with older Tailscale Kubernetes operator versions - users must ensure that the deployed operator is at the same version or newer (up to 4 version skew) than the proxies. Updates tailscale/tailscale#12061 Co-authored-by: Maisem Ali Signed-off-by: Irbe Krumina (cherry picked from commit 131ae6e394709778d7903c16ac5d5ef66a086f2e) --- cmd/containerboot/main.go | 58 ++++++++- cmd/containerboot/main_test.go | 10 +- cmd/k8s-operator/operator_test.go | 4 +- cmd/k8s-operator/sts.go | 188 +++++++++++++++++++---------- cmd/k8s-operator/testutils_test.go | 38 +++--- ipn/conf.go | 9 +- k8s-operator/tsdns.go | 23 ---- k8s-operator/utils.go | 49 ++++++++ 8 files changed, 256 insertions(+), 123 deletions(-) delete mode 100644 k8s-operator/tsdns.go create mode 100644 k8s-operator/utils.go diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 80033e138ab38..abeb85d0974a7 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -52,8 +52,10 @@ // ${TS_CERT_DOMAIN}, it will be replaced with the value of the available FQDN. // It cannot be used in conjunction with TS_DEST_IP. The file is watched for changes, // and will be re-applied when it changes. -// - EXPERIMENTAL_TS_CONFIGFILE_PATH: if specified, a path to tailscaled -// config. If this is set, TS_HOSTNAME, TS_EXTRA_ARGS, TS_AUTHKEY, +// - TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR: if specified, a path to a +// directory that containers tailscaled config in file. The config file needs to be +// named cap-.hujson. If this is set, TS_HOSTNAME, +// TS_EXTRA_ARGS, TS_AUTHKEY, // TS_ROUTES, TS_ACCEPT_DNS env vars must not be set. If this is set, // containerboot only runs `tailscaled --config ` // and not `tailscale up` or `tailscale set`. @@ -92,6 +94,7 @@ import ( "os" "os/exec" "os/signal" + "path" "path/filepath" "reflect" "slices" @@ -107,6 +110,7 @@ import ( "tailscale.com/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/conffile" + kubeutils "tailscale.com/k8s-operator" "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/types/ptr" @@ -145,7 +149,7 @@ func main() { Socket: defaultEnv("TS_SOCKET", "/tmp/tailscaled.sock"), AuthOnce: defaultBool("TS_AUTH_ONCE", false), Root: defaultEnv("TS_TEST_ONLY_ROOT", "/"), - TailscaledConfigFilePath: defaultEnv("EXPERIMENTAL_TS_CONFIGFILE_PATH", ""), + TailscaledConfigFilePath: tailscaledConfigFilePath(), AllowProxyingClusterTrafficViaIngress: defaultBool("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS", false), PodIP: defaultEnv("POD_IP", ""), } @@ -1097,6 +1101,13 @@ type settings struct { func (s *settings) validate() error { if s.TailscaledConfigFilePath != "" { + dir, file := path.Split(s.TailscaledConfigFilePath) + if _, err := os.Stat(dir); err != nil { + return fmt.Errorf("error validating whether directory with tailscaled config file %s exists: %w", dir, err) + } + if _, err := os.Stat(s.TailscaledConfigFilePath); err != nil { + return fmt.Errorf("error validating whether tailscaled config directory %q contains tailscaled config for current capability version %q: %w. If this is a Tailscale Kubernetes operator proxy, please ensure that the version of the operator is not older than the version of the proxy", dir, file, err) + } if _, err := conffile.Load(s.TailscaledConfigFilePath); err != nil { return fmt.Errorf("error validating tailscaled configfile contents: %w", err) } @@ -1120,7 +1131,7 @@ func (s *settings) validate() error { return errors.New("Both TS_TAILNET_TARGET_IP and TS_TAILNET_FQDN cannot be set") } if s.TailscaledConfigFilePath != "" && (s.AcceptDNS != nil || s.AuthKey != "" || s.Routes != nil || s.ExtraArgs != "" || s.Hostname != "") { - return errors.New("EXPERIMENTAL_TS_CONFIGFILE_PATH cannot be set in combination with TS_HOSTNAME, TS_EXTRA_ARGS, TS_AUTHKEY, TS_ROUTES, TS_ACCEPT_DNS.") + return errors.New("TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR cannot be set in combination with TS_HOSTNAME, TS_EXTRA_ARGS, TS_AUTHKEY, TS_ROUTES, TS_ACCEPT_DNS.") } if s.AllowProxyingClusterTrafficViaIngress && s.UserspaceMode { return errors.New("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS is not supported in userspace mode") @@ -1252,3 +1263,42 @@ func isTwoStepConfigAlwaysAuth(cfg *settings) bool { func isOneStepConfig(cfg *settings) bool { return cfg.TailscaledConfigFilePath != "" } + +// tailscaledConfigFilePath returns the path to the tailscaled config file that +// should be used for the current capability version. It is determined by the +// TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR environment variable and looks for a +// file named cap-.hujson in the directory. It searches for +// the highest capability version that is less than or equal to the current +// capability version. +func tailscaledConfigFilePath() string { + dir := os.Getenv("TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR") + if dir == "" { + return "" + } + fe, err := os.ReadDir(dir) + if err != nil { + log.Fatalf("error reading tailscaled config directory %q: %v", dir, err) + } + maxCompatVer := tailcfg.CapabilityVersion(-1) + for _, e := range fe { + // We don't check if type if file as in most cases this will + // come from a mounted kube Secret, where the directory contents + // will be various symlinks. + if e.Type().IsDir() { + continue + } + cv, err := kubeutils.CapVerFromFileName(e.Name()) + if err != nil { + log.Printf("skipping file %q in tailscaled config directory %q: %v", e.Name(), dir, err) + continue + } + if cv > maxCompatVer && cv <= tailcfg.CurrentCapabilityVersion { + maxCompatVer = cv + } + } + if maxCompatVer == -1 { + log.Fatalf("no tailscaled config file found in %q for current capability version %q", dir, tailcfg.CurrentCapabilityVersion) + } + log.Printf("Using tailscaled config file %q for capability version %q", maxCompatVer, tailcfg.CurrentCapabilityVersion) + return path.Join(dir, kubeutils.TailscaledConfigFileNameForCap(maxCompatVer)) +} diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index 1bc423fdc244c..e0ddd62bfc8bb 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -65,7 +65,7 @@ func TestContainerBoot(t *testing.T) { "dev/net", "proc/sys/net/ipv4", "proc/sys/net/ipv6/conf/all", - "etc", + "etc/tailscaled", } for _, path := range dirs { if err := os.MkdirAll(filepath.Join(d, path), 0700); err != nil { @@ -80,7 +80,7 @@ func TestContainerBoot(t *testing.T) { "dev/net/tun": []byte(""), "proc/sys/net/ipv4/ip_forward": []byte("0"), "proc/sys/net/ipv6/conf/all/forwarding": []byte("0"), - "etc/tailscaled": tailscaledConfBytes, + "etc/tailscaled/cap-95.hujson": tailscaledConfBytes, } resetFiles := func() { for path, content := range files { @@ -638,14 +638,14 @@ func TestContainerBoot(t *testing.T) { }, }, { - Name: "experimental tailscaled configfile", + Name: "experimental tailscaled config path", Env: map[string]string{ - "EXPERIMENTAL_TS_CONFIGFILE_PATH": filepath.Join(d, "etc/tailscaled"), + "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR": filepath.Join(d, "etc/tailscaled/"), }, Phases: []phase{ { WantCmds: []string{ - "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking --config=/etc/tailscaled", + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking --config=/etc/tailscaled/cap-95.hujson", }, }, { Notify: runningNotify, diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 6556b025097d5..b9bd9f7832ac4 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1182,7 +1182,7 @@ func TestTailscaledConfigfileHash(t *testing.T) { parentType: "svc", hostname: "default-test", clusterTargetIP: "10.20.30.40", - confFileHash: "705e5ffd0bd5326237efdf542c850a65a54101284d5daa30775420fcc64d89c1", + confFileHash: "e09bededa0379920141cbd0b0dbdf9b8b66545877f9e8397423f5ce3e1ba439e", } expectEqual(t, fc, expectedSTS(t, fc, o), nil) @@ -1192,7 +1192,7 @@ func TestTailscaledConfigfileHash(t *testing.T) { mak.Set(&svc.Annotations, AnnotationHostname, "another-test") }) o.hostname = "another-test" - o.confFileHash = "1a087f887825d2b75d3673c7c2b0131f8ec1f0b1cb761d33e236dd28350dfe23" + o.confFileHash = "5d754cf55463135ee34aa9821f2fd8483b53eb0570c3740c84a086304f427684" expectReconciled(t, sr, "default", "test") expectEqual(t, fc, expectedSTS(t, fc, o), nil) } diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index defbbfd2384b4..4d9417ac003ec 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -29,6 +29,7 @@ import ( "sigs.k8s.io/yaml" "tailscale.com/client/tailscale" "tailscale.com/ipn" + kubeutils "tailscale.com/k8s-operator" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/net/netutil" @@ -92,10 +93,6 @@ const ( podAnnotationLastSetTailnetTargetFQDN = "tailscale.com/operator-last-set-ts-tailnet-target-fqdn" // podAnnotationLastSetConfigFileHash is sha256 hash of the current tailscaled configuration contents. podAnnotationLastSetConfigFileHash = "tailscale.com/operator-last-set-config-file-hash" - - // tailscaledConfigKey is the name of the key in proxy Secret Data that - // holds the tailscaled config contents. - tailscaledConfigKey = "tailscaled" ) var ( @@ -174,11 +171,11 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga return nil, fmt.Errorf("failed to reconcile headless service: %w", err) } - secretName, tsConfigHash, err := a.createOrGetSecret(ctx, logger, sts, hsvc) + secretName, tsConfigHash, configs, err := a.createOrGetSecret(ctx, logger, sts, hsvc) if err != nil { return nil, fmt.Errorf("failed to create or get API key secret: %w", err) } - _, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretName, tsConfigHash) + _, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretName, tsConfigHash, configs) if err != nil { return nil, fmt.Errorf("failed to reconcile statefulset: %w", err) } @@ -291,7 +288,7 @@ func (a *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, l return createOrUpdate(ctx, a.Client, a.operatorNamespace, hsvc, func(svc *corev1.Service) { svc.Spec = hsvc.Spec }) } -func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) (string, string, error) { +func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) (secretName, hash string, configs tailscaleConfigs, _ error) { secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ // Hardcode a -0 suffix so that in future, if we support @@ -307,25 +304,23 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger * logger.Debugf("secret %s/%s already exists", secret.GetNamespace(), secret.GetName()) orig = secret.DeepCopy() } else if !apierrors.IsNotFound(err) { - return "", "", err + return "", "", nil, err } - var ( - authKey, hash string - ) + var authKey string if orig == nil { // Initially it contains only tailscaled config, but when the // proxy starts, it will also store there the state, certs and // ACME account key. sts, err := getSingleObject[appsv1.StatefulSet](ctx, a.Client, a.operatorNamespace, stsC.ChildResourceLabels) if err != nil { - return "", "", err + return "", "", nil, err } if sts != nil { // StatefulSet exists, so we have already created the secret. // If the secret is missing, they should delete the StatefulSet. logger.Errorf("Tailscale proxy secret doesn't exist, but the corresponding StatefulSet %s/%s already does. Something is wrong, please delete the StatefulSet.", sts.GetNamespace(), sts.GetName()) - return "", "", nil + return "", "", nil, nil } // Create API Key secret which is going to be used by the statefulset // to authenticate with Tailscale. @@ -336,45 +331,58 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger * } authKey, err = a.newAuthKey(ctx, tags) if err != nil { - return "", "", err + return "", "", nil, err } } - confFileBytes, h, err := tailscaledConfig(stsC, authKey, orig) + configs, err := tailscaledConfig(stsC, authKey, orig) if err != nil { - return "", "", fmt.Errorf("error creating tailscaled config: %w", err) + return "", "", nil, fmt.Errorf("error creating tailscaled config: %w", err) + } + hash, err = tailscaledConfigHash(configs) + if err != nil { + return "", "", nil, fmt.Errorf("error calculating hash of tailscaled configs: %w", err) + } + + latest := tailcfg.CapabilityVersion(-1) + var latestConfig ipn.ConfigVAlpha + for key, val := range configs { + fn := kubeutils.TailscaledConfigFileNameForCap(key) + b, err := json.Marshal(val) + if err != nil { + return "", "", nil, fmt.Errorf("error marshalling tailscaled config: %w", err) + } + mak.Set(&secret.StringData, fn, string(b)) + if key > latest { + latest = key + latestConfig = val + } } - hash = h - mak.Set(&secret.StringData, tailscaledConfigKey, string(confFileBytes)) if stsC.ServeConfig != nil { j, err := json.Marshal(stsC.ServeConfig) if err != nil { - return "", "", err + return "", "", nil, err } mak.Set(&secret.StringData, "serve-config", string(j)) } if orig != nil { - logger.Debugf("patching the existing proxy Secret with tailscaled config %s", sanitizeConfigBytes(secret.Data[tailscaledConfigKey])) + logger.Debugf("patching the existing proxy Secret with tailscaled config %s", sanitizeConfigBytes(latestConfig)) if err := a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil { - return "", "", err + return "", "", nil, err } } else { - logger.Debugf("creating a new Secret for the proxy with tailscaled config %s", sanitizeConfigBytes([]byte(secret.StringData[tailscaledConfigKey]))) + logger.Debugf("creating a new Secret for the proxy with tailscaled config %s", sanitizeConfigBytes(latestConfig)) if err := a.Create(ctx, secret); err != nil { - return "", "", err + return "", "", nil, err } } - return secret.Name, hash, nil + return secret.Name, hash, configs, nil } // sanitizeConfigBytes returns ipn.ConfigVAlpha in string form with redacted // auth key. -func sanitizeConfigBytes(bs []byte) string { - c := &ipn.ConfigVAlpha{} - if err := json.Unmarshal(bs, c); err != nil { - return "invalid config" - } +func sanitizeConfigBytes(c ipn.ConfigVAlpha) string { if c.AuthKey != nil { c.AuthKey = ptr.To("**redacted**") } @@ -437,7 +445,7 @@ var proxyYaml []byte //go:embed deploy/manifests/userspace-proxy.yaml var userspaceProxyYaml []byte -func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret, tsConfigHash string) (*appsv1.StatefulSet, error) { +func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret, tsConfigHash string, configs map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) (*appsv1.StatefulSet, error) { ss := new(appsv1.StatefulSet) if sts.ServeConfig != nil && sts.ForwardClusterTrafficViaL7IngressProxy != true { // If forwarding cluster traffic via is required we need non-userspace + NET_ADMIN + forwarding if err := yaml.Unmarshal(userspaceProxyYaml, &ss); err != nil { @@ -493,9 +501,15 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S Value: proxySecret, }, corev1.EnvVar{ + // Old tailscaled config key is still used for backwards compatibility. Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH", Value: "/etc/tsconfig/tailscaled", }, + corev1.EnvVar{ + // New style is in the form of cap-.hujson. + Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", + Value: "/etc/tsconfig", + }, ) if sts.ForwardClusterTrafficViaL7IngressProxy { container.Env = append(container.Env, corev1.EnvVar{ @@ -505,18 +519,16 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S } // Configure containeboot to run tailscaled with a configfile read from the state Secret. mak.Set(&ss.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash, tsConfigHash) - pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{ + + configVolume := corev1.Volume{ Name: "tailscaledconfig", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: proxySecret, - Items: []corev1.KeyToPath{{ - Key: tailscaledConfigKey, - Path: tailscaledConfigKey, - }}, }, }, - }) + } + pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, configVolume) container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ Name: "tailscaledconfig", ReadOnly: true, @@ -571,10 +583,7 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: proxySecret, - Items: []corev1.KeyToPath{{ - Key: "serve-config", - Path: "serve-config", - }}, + Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}, }, }, }) @@ -716,42 +725,82 @@ func enableMetrics(ss *appsv1.StatefulSet, pc *tsapi.ProxyClass) { } } +func readAuthKey(secret *corev1.Secret, key string) (*string, error) { + origConf := &ipn.ConfigVAlpha{} + if err := json.Unmarshal([]byte(secret.Data[key]), origConf); err != nil { + return nil, fmt.Errorf("error unmarshaling previous tailscaled config in %q: %w", key, err) + } + return origConf.AuthKey, nil +} + // tailscaledConfig takes a proxy config, a newly generated auth key if // generated and a Secret with the previous proxy state and auth key and -// produces returns tailscaled configuration and a hash of that configuration. -func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) ([]byte, string, error) { - conf := ipn.ConfigVAlpha{ - Version: "alpha0", - AcceptDNS: "false", - AcceptRoutes: "false", // AcceptRoutes defaults to true - Locked: "false", - Hostname: &stsC.Hostname, +// returns tailscaled configuration and a hash of that configuration. +// +// As of 2024-05-09 it also returns legacy tailscaled config without the +// later added NoStatefulFilter field to support proxies older than cap95. +// TODO (irbekrm): remove the legacy config once we no longer need to support +// versions older than cap94, +// https://tailscale.com/kb/1236/kubernetes-operator#operator-and-proxies +func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaleConfigs, error) { + conf := &ipn.ConfigVAlpha{ + Version: "alpha0", + AcceptDNS: "false", + AcceptRoutes: "false", // AcceptRoutes defaults to true + Locked: "false", + Hostname: &stsC.Hostname, + NoStatefulFiltering: "false", + } + + // For egress proxies only, we need to ensure that stateful filtering is + // not in place so that traffic from cluster can be forwarded via + // Tailscale IPs. + if stsC.TailnetTargetFQDN != "" || stsC.TailnetTargetIP != "" { + conf.NoStatefulFiltering = "true" } if stsC.Connector != nil { routes, err := netutil.CalcAdvertiseRoutes(stsC.Connector.routes, stsC.Connector.isExitNode) if err != nil { - return nil, "", fmt.Errorf("error calculating routes: %w", err) + return nil, fmt.Errorf("error calculating routes: %w", err) } conf.AdvertiseRoutes = routes } if newAuthkey != "" { conf.AuthKey = &newAuthkey - } else if oldSecret != nil && len(oldSecret.Data[tailscaledConfigKey]) > 0 { // write to StringData, read from Data as StringData is write-only - origConf := &ipn.ConfigVAlpha{} - if err := json.Unmarshal([]byte(oldSecret.Data[tailscaledConfigKey]), origConf); err != nil { - return nil, "", fmt.Errorf("error unmarshaling previous tailscaled config: %w", err) + } else if oldSecret != nil { + var err error + latest := tailcfg.CapabilityVersion(-1) + latestStr := "" + for k, data := range oldSecret.Data { + // write to StringData, read from Data as StringData is write-only + if len(data) == 0 { + continue + } + v, err := kubeutils.CapVerFromFileName(k) + if err != nil { + continue + } + if v > latest { + latestStr = k + latest = v + } + } + // Allow for configs that don't contain an auth key. Perhaps + // users have some mechanisms to delete them. Auth key is + // normally not needed after the initial login. + if latestStr != "" { + conf.AuthKey, err = readAuthKey(oldSecret, latestStr) + if err != nil { + return nil, err + } } - conf.AuthKey = origConf.AuthKey - } - confFileBytes, err := json.Marshal(conf) - if err != nil { - return nil, "", fmt.Errorf("error marshaling tailscaled config : %w", err) - } - hash, err := hashBytes(confFileBytes) - if err != nil { - return nil, "", fmt.Errorf("error calculating config hash: %w", err) } - return confFileBytes, hash, nil + capVerConfigs := make(map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) + capVerConfigs[95] = *conf + // legacy config should not contain NoStatefulFiltering field. + conf.NoStatefulFiltering.Clear() + capVerConfigs[94] = *conf + return capVerConfigs, nil } // ptrObject is a type constraint for pointer types that implement @@ -761,7 +810,9 @@ type ptrObject[T any] interface { *T } -// hashBytes produces a hash for the provided bytes that is the same across +type tailscaleConfigs map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha + +// hashBytes produces a hash for the provided tailscaled config that is the same across // different invocations of this code. We do not use the // tailscale.com/deephash.Hash here because that produces a different hash for // the same value in different tailscale builds. The hash we are producing here @@ -770,10 +821,13 @@ type ptrObject[T any] interface { // thing that changed is operator version (the hash is also exposed to users via // an annotation and might be confusing if it changes without the config having // changed). -func hashBytes(b []byte) (string, error) { - h := sha256.New() - _, err := h.Write(b) +func tailscaledConfigHash(c tailscaleConfigs) (string, error) { + b, err := json.Marshal(c) if err != nil { + return "", fmt.Errorf("error marshalling tailscaled configs: %w", err) + } + h := sha256.New() + if _, err = h.Write(b); err != nil { return "", fmt.Errorf("error calculating hash: %w", err) } return fmt.Sprintf("%x", h.Sum(nil)), nil diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index ae9eaebd1564d..684b75433f868 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -67,6 +67,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef {Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "TS_KUBE_SECRET", Value: opts.secretName}, {Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH", Value: "/etc/tsconfig/tailscaled"}, + {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"}, }, SecurityContext: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ @@ -89,12 +90,6 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: opts.secretName, - Items: []corev1.KeyToPath{ - { - Key: "tailscaled", - Path: "tailscaled", - }, - }, }, }, }, @@ -144,9 +139,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef Name: "TS_SERVE_CONFIG", Value: "/etc/tailscaled/serve-config", }) - volumes = append(volumes, corev1.Volume{ - Name: "serve-config", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: opts.secretName, Items: []corev1.KeyToPath{{Path: "serve-config", Key: "serve-config"}}}}, - }) + volumes = append(volumes, corev1.Volume{Name: "serve-config", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: opts.secretName, Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}}}}) tsContainer.VolumeMounts = append(tsContainer.VolumeMounts, corev1.VolumeMount{Name: "serve-config", ReadOnly: true, MountPath: "/etc/tailscaled"}) } ss := &appsv1.StatefulSet{ @@ -229,6 +222,7 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps {Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "TS_KUBE_SECRET", Value: opts.secretName}, {Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH", Value: "/etc/tsconfig/tailscaled"}, + {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"}, {Name: "TS_SERVE_CONFIG", Value: "/etc/tailscaled/serve-config"}, }, ImagePullPolicy: "Always", @@ -243,20 +237,12 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: opts.secretName, - Items: []corev1.KeyToPath{ - { - Key: "tailscaled", - Path: "tailscaled", - }, - }, }, }, }, {Name: "serve-config", VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{SecretName: opts.secretName, - Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}}}, - }, + Secret: &corev1.SecretVolumeSource{SecretName: opts.secretName, Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}}}}, } ss := &appsv1.StatefulSet{ TypeMeta: metav1.TypeMeta{ @@ -388,7 +374,17 @@ func expectedSecret(t *testing.T, opts configOpts) *corev1.Secret { if err != nil { t.Fatalf("error marshalling tailscaled config") } + if opts.tailnetTargetFQDN != "" || opts.tailnetTargetIP != "" { + conf.NoStatefulFiltering = "true" + } else { + conf.NoStatefulFiltering = "false" + } + bn, err := json.Marshal(conf) + if err != nil { + t.Fatalf("error marshalling tailscaled config") + } mak.Set(&s.StringData, "tailscaled", string(b)) + mak.Set(&s.StringData, "cap-95.hujson", string(bn)) labels := map[string]string{ "tailscale.com/managed": "true", "tailscale.com/parent-resource": "test", @@ -463,7 +459,7 @@ func mustUpdateStatus[T any, O ptrObject[T]](t *testing.T, client client.Client, // they are not present in the passed object and use the modify func to remove // them from the cluster object. If no such modifications are needed, you can // pass nil in place of the modify function. -func expectEqual[T any, O ptrObject[T]](t *testing.T, client client.Client, want O, modify func(O)) { +func expectEqual[T any, O ptrObject[T]](t *testing.T, client client.Client, want O, modifier func(O)) { t.Helper() got := O(new(T)) if err := client.Get(context.Background(), types.NamespacedName{ @@ -477,8 +473,8 @@ func expectEqual[T any, O ptrObject[T]](t *testing.T, client client.Client, want // so just remove it from both got and want. got.SetResourceVersion("") want.SetResourceVersion("") - if modify != nil { - modify(got) + if modifier != nil { + modifier(got) } if diff := cmp.Diff(got, want); diff != "" { t.Fatalf("unexpected object (-got +want):\n%s", diff) diff --git a/ipn/conf.go b/ipn/conf.go index ffd4c72693ef3..ed6e3c8714daa 100644 --- a/ipn/conf.go +++ b/ipn/conf.go @@ -32,7 +32,8 @@ type ConfigVAlpha struct { AdvertiseRoutes []netip.Prefix `json:",omitempty"` DisableSNAT opt.Bool `json:",omitempty"` - NetfilterMode *string `json:",omitempty"` // "on", "off", "nodivert" + NetfilterMode *string `json:",omitempty"` // "on", "off", "nodivert" + NoStatefulFiltering opt.Bool `json:",omitempty"` PostureChecking opt.Bool `json:",omitempty"` RunSSHServer opt.Bool `json:",omitempty"` // Tailscale SSH @@ -50,6 +51,7 @@ func (c *ConfigVAlpha) ToPrefs() (MaskedPrefs, error) { if c == nil { return mp, nil } + mp.WantRunning = !c.Enabled.EqualBool(false) mp.WantRunningSet = mp.WantRunning || c.Enabled != "" if c.ServerURL != nil { @@ -98,6 +100,11 @@ func (c *ConfigVAlpha) ToPrefs() (MaskedPrefs, error) { mp.NoSNAT = c.DisableSNAT.EqualBool(true) mp.NoSNAT = true } + if c.NoStatefulFiltering != "" { + mp.NoStatefulFiltering = c.NoStatefulFiltering + mp.NoStatefulFilteringSet = true + } + if c.NetfilterMode != nil { m, err := preftype.ParseNetfilterMode(*c.NetfilterMode) if err != nil { diff --git a/k8s-operator/tsdns.go b/k8s-operator/tsdns.go deleted file mode 100644 index 0269bca11ccd5..0000000000000 --- a/k8s-operator/tsdns.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !plan9 - -package kube - -const ( - Alpha1Version = "v1alpha1" - - DNSRecordsCMName = "dnsrecords" - DNSRecordsCMKey = "records.json" -) - -type Records struct { - // Version is the version of this Records configuration. Version is - // written by the operator, i.e when it first populates the Records. - // k8s-nameserver must verify that it knows how to parse a given - // version. - Version string `json:"version"` - // IP4 contains a mapping of DNS names to IPv4 address(es). - IP4 map[string][]string `json:"ip4"` -} diff --git a/k8s-operator/utils.go b/k8s-operator/utils.go new file mode 100644 index 0000000000000..7d755f8eb6e82 --- /dev/null +++ b/k8s-operator/utils.go @@ -0,0 +1,49 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package kube + +import ( + "fmt" + + "tailscale.com/tailcfg" +) + +const ( + Alpha1Version = "v1alpha1" + + DNSRecordsCMName = "dnsrecords" + DNSRecordsCMKey = "records.json" +) + +type Records struct { + // Version is the version of this Records configuration. Version is + // written by the operator, i.e when it first populates the Records. + // k8s-nameserver must verify that it knows how to parse a given + // version. + Version string `json:"version"` + // IP4 contains a mapping of DNS names to IPv4 address(es). + IP4 map[string][]string `json:"ip4"` +} + +// TailscaledConfigFileNameForCap returns a tailscaled config file name in +// format expected by containerboot for the given CapVer. +func TailscaledConfigFileNameForCap(cap tailcfg.CapabilityVersion) string { + if cap < 95 { + return "tailscaled" + } + return fmt.Sprintf("cap-%v.hujson", cap) +} + +// CapVerFromFileName parses the capability version from a tailscaled +// config file name previously generated by TailscaledConfigFileNameForCap. +func CapVerFromFileName(name string) (tailcfg.CapabilityVersion, error) { + if name == "tailscaled" { + return 0, nil + } + var cap tailcfg.CapabilityVersion + _, err := fmt.Sscanf(name, "cap-%d.hujson", &cap) + return cap, err +} From 32cb8a3d96543ca8146df51b864d7aed8779780d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 10 May 2024 13:25:08 -0700 Subject: [PATCH 2/6] ipn/ipnlocal: simplify authURL vs authURLSticky, remove interact field The previous LocalBackend & CLI 'up' changes improved some stuff, but might've been too aggressive in some edge cases. This simplifies the authURL vs authURLSticky distinction and removes the interact field, which seemed to just just be about duplicate URL suppression in IPN bus, back from when the IPN bus was a single client at a time. This moves that suppression to a different spot. Fixes #12119 Updates #12028 Updates #12042 Change-Id: I1f8800b1e82ccc1c8a0d7abba559e7404ddf41e4 Signed-off-by: Brad Fitzpatrick (cherry picked from commit 8aa5c3534da70118d8878d7053957a10817052ed) --- ipn/ipnlocal/local.go | 38 +++++++++++++++++++++----------------- ipn/ipnlocal/state_test.go | 17 ++++++++--------- 2 files changed, 29 insertions(+), 26 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index e2326e306b840..aaf37097a0cdd 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -259,10 +259,8 @@ type LocalBackend struct { endpoints []tailcfg.Endpoint blocked bool keyExpired bool - authURL string // cleared on Notify - authURLSticky string // not cleared on Notify + authURL string // non-empty if not Running authURLTime time.Time // when the authURL was received from the control server - interact bool egg bool prevIfState *netmon.State peerAPIServer *peerAPIServer // or nil @@ -785,7 +783,7 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) { s.Version = version.Long() s.TUN = !b.sys.IsNetstack() s.BackendState = b.state.String() - s.AuthURL = b.authURLSticky + s.AuthURL = b.authURL if prefs := b.pm.CurrentPrefs(); prefs.Valid() && prefs.AutoUpdate().Check { s.ClientVersion = b.lastClientVersion } @@ -1139,7 +1137,6 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control prefsChanged := false prefs := b.pm.CurrentPrefs().AsStruct() netMap := b.netMap - interact := b.interact if prefs.ControlURL == "" { // Once we get a message from the control plane, set @@ -1158,7 +1155,6 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control } if st.URL != "" { b.authURL = st.URL - b.authURLSticky = st.URL b.authURLTime = b.clock.Now() } if (wasBlocked || b.seamlessRenewalEnabled()) && st.LoginFinished() { @@ -1276,9 +1272,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control } if st.URL != "" { b.logf("Received auth URL: %.20v...", st.URL) - if interact { - b.popBrowserAuthNow() - } + b.popBrowserAuthNow() } b.stateMachine() // This is currently (2020-07-28) necessary; conditionally disabling it is fragile! @@ -2281,8 +2275,8 @@ func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWa if mask&ipn.NotifyInitialState != 0 { ini.SessionID = sessionID ini.State = ptr.To(b.state) - if b.state == ipn.NeedsLogin && b.authURLSticky != "" { - ini.BrowseToURL = ptr.To(b.authURLSticky) + if b.state == ipn.NeedsLogin && b.authURL != "" { + ini.BrowseToURL = ptr.To(b.authURL) } } if mask&ipn.NotifyInitialPrefs != 0 { @@ -2336,11 +2330,27 @@ func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWa // TODO(marwan-at-work): streaming background logs? defer b.DeleteForegroundSession(sessionID) + var lastURLPop string // to dup suppress URL popups for { select { case <-ctx.Done(): return case n, ok := <-ch: + // URLs flow into Notify.BrowseToURL via two means: + // 1. From MapResponse.PopBrowserURL, which already says they're dup + // suppressed if identical, and that's done by the controlclient, + // so this added later adds nothing. + // + // 2. From the controlclient auth routes, on register. This makes sure + // we don't tell clients (mac, windows, android) to pop the same URL + // multiple times. + if n != nil && n.BrowseToURL != nil { + if v := *n.BrowseToURL; v == lastURLPop { + n.BrowseToURL = nil + } else { + lastURLPop = v + } + } if !ok || !fn(n) { return } @@ -2476,8 +2486,6 @@ func (b *LocalBackend) sendFileNotify() { func (b *LocalBackend) popBrowserAuthNow() { b.mu.Lock() url := b.authURL - b.interact = false - b.authURL = "" // but NOT clearing authURLSticky expired := b.keyExpired b.mu.Unlock() @@ -2805,7 +2813,6 @@ func (b *LocalBackend) StartLoginInteractive(ctx context.Context) error { if b.cc == nil { panic("LocalBackend.assertClient: b.cc == nil") } - b.interact = true url := b.authURL timeSinceAuthURLCreated := b.clock.Since(b.authURLTime) cc := b.cc @@ -4347,7 +4354,6 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock authURL := b.authURL if newState == ipn.Running { b.authURL = "" - b.authURLSticky = "" b.authURLTime = time.Time{} } else if oldState == ipn.Running { // Transitioning away from running. @@ -4607,7 +4613,6 @@ func (b *LocalBackend) resetControlClientLocked() controlclient.Client { } b.authURL = "" - b.authURLSticky = "" // When we clear the control client, stop any outstanding netmap expiry // timer; synthesizing a new netmap while we don't have a control @@ -4653,7 +4658,6 @@ func (b *LocalBackend) ResetForClientDisconnect() { } b.keyExpired = false b.authURL = "" - b.authURLSticky = "" b.authURLTime = time.Time{} b.activeLogin = "" b.resetDialPlan() diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index ae88716197b1f..f1b11e737c05e 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -329,7 +329,7 @@ func TestStateMachine(t *testing.T) { (n.Prefs != nil && n.Prefs.Valid()) || n.BrowseToURL != nil || n.LoginFinished != nil { - logf("%v\n\n", n) + logf("%+v\n\n", n) notifies.put(n) } else { logf("(ignored) %v\n\n", n) @@ -406,7 +406,7 @@ func TestStateMachine(t *testing.T) { // the user needs to visit a login URL. t.Logf("\n\nLogin (url response)") - notifies.expect(2) + notifies.expect(3) b.EditPrefs(&ipn.MaskedPrefs{ ControlURLSet: true, Prefs: ipn.Prefs{ @@ -421,12 +421,15 @@ func TestStateMachine(t *testing.T) { // ...but backend eats that notification, because the user // didn't explicitly request interactive login yet, and // we're already in NeedsLogin state. - nn := notifies.drain(2) + nn := notifies.drain(3) c.Assert(nn[1].Prefs, qt.IsNotNil) c.Assert(nn[1].Prefs.LoggedOut(), qt.IsTrue) c.Assert(nn[1].Prefs.WantRunning(), qt.IsFalse) c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + c.Assert(nn[2].BrowseToURL, qt.IsNotNil) + c.Assert(url1, qt.Equals, *nn[2].BrowseToURL) + c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) } // Now we'll try an interactive login. @@ -434,13 +437,10 @@ func TestStateMachine(t *testing.T) { // ask control to do anything. Instead backend will emit an event // indicating that the UI should browse to the given URL. t.Logf("\n\nLogin (interactive)") - notifies.expect(1) + notifies.expect(0) b.StartLoginInteractive(context.Background()) { - nn := notifies.drain(1) cc.assertCalls() - c.Assert(nn[0].BrowseToURL, qt.IsNotNil) - c.Assert(url1, qt.Equals, *nn[0].BrowseToURL) c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) } @@ -453,9 +453,8 @@ func TestStateMachine(t *testing.T) { notifies.expect(0) b.StartLoginInteractive(context.Background()) { - notifies.drain(0) // backend asks control for another login sequence - cc.assertCalls("Login") + cc.assertCalls() c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) } From 9d2768a8b3485179e608a43f0be8c83883e85112 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Tue, 14 May 2024 13:24:11 +0100 Subject: [PATCH 3/6] util/linuxfw: fix IPv6 availability check for nftables (#12009) (#12123) * util/linuxfw: fix IPv6 NAT availability check for nftables When running firewall in nftables mode, there is no need for a separate NAT availability check (unlike with iptables, there are no hosts that support nftables, but not IPv6 NAT - see tailscale/tailscale#11353). This change fixes a firewall NAT availability check that was using the no-longer set ipv6NATAvailable field by removing the field and using a method that, for nftables, just checks that IPv6 is available. Updates tailscale/tailscale#12008 Signed-off-by: Irbe Krumina (cherry picked from commit 7ef2f7213528d388c3e40bfbd1da0fa9bd32a58f) --- util/linuxfw/iptables_runner.go | 2 +- util/linuxfw/linuxfw.go | 7 ++ util/linuxfw/nftables_runner.go | 49 ++++------- util/linuxfw/nftables_runner_test.go | 127 +++++++++++++++++---------- 4 files changed, 107 insertions(+), 78 deletions(-) diff --git a/util/linuxfw/iptables_runner.go b/util/linuxfw/iptables_runner.go index dea764acae40c..e2e04af9c55b0 100644 --- a/util/linuxfw/iptables_runner.go +++ b/util/linuxfw/iptables_runner.go @@ -87,7 +87,7 @@ func newIPTablesRunner(logf logger.Logf) (*iptablesRunner, error) { } supportsV6Filter = checkSupportsV6Filter(ipt6, logf) supportsV6NAT = checkSupportsV6NAT(ipt6, logf) - logf("v6 = %v, v6filter = %v, v6nat = %v", supportsV6, supportsV6Filter, supportsV6NAT) + logf("netfilter running in iptables mode v6 = %v, v6filter = %v, v6nat = %v", supportsV6, supportsV6Filter, supportsV6NAT) } return &iptablesRunner{ ipt4: ipt4, diff --git a/util/linuxfw/linuxfw.go b/util/linuxfw/linuxfw.go index 0f47328f97309..2e8c1330b5dce 100644 --- a/util/linuxfw/linuxfw.go +++ b/util/linuxfw/linuxfw.go @@ -104,12 +104,19 @@ func getTailscaleSubnetRouteMark() []byte { return []byte{0x00, 0x04, 0x00, 0x00} } +// checkIPv6ForTest can be set in tests. +var checkIPv6ForTest func(logger.Logf) error + // checkIPv6 checks whether the system appears to have a working IPv6 // network stack. It returns an error explaining what looks wrong or // missing. It does not check that IPv6 is currently functional or // that there's a global address, just that the system would support // IPv6 if it were on an IPv6 network. func CheckIPv6(logf logger.Logf) error { + if f := checkIPv6ForTest; f != nil { + return f(logf) + } + _, err := os.Stat("/proc/sys/net/ipv6") if os.IsNotExist(err) { return err diff --git a/util/linuxfw/nftables_runner.go b/util/linuxfw/nftables_runner.go index 1bb508a983cef..aac28209ff760 100644 --- a/util/linuxfw/nftables_runner.go +++ b/util/linuxfw/nftables_runner.go @@ -41,8 +41,9 @@ type chainInfo struct { chainPolicy *nftables.ChainPolicy } +// nftable contains nat and filter tables for the given IP family (Proto). type nftable struct { - Proto nftables.TableFamily + Proto nftables.TableFamily // IPv4 or IPv6 Filter *nftables.Table Nat *nftables.Table } @@ -69,11 +70,10 @@ type nftable struct { // https://wiki.nftables.org/wiki-nftables/index.php/Configuring_chains type nftablesRunner struct { conn *nftables.Conn - nft4 *nftable - nft6 *nftable + nft4 *nftable // IPv4 tables + nft6 *nftable // IPv6 tables - v6Available bool - v6NATAvailable bool + v6Available bool // whether the host supports IPv6 } func (n *nftablesRunner) ensurePreroutingChain(dst netip.Addr) (*nftables.Table, *nftables.Chain, error) { @@ -598,6 +598,10 @@ func newNfTablesRunner(logf logger.Logf) (*nftablesRunner, error) { if err != nil { return nil, fmt.Errorf("nftables connection: %w", err) } + return newNfTablesRunnerWithConn(logf, conn), nil +} + +func newNfTablesRunnerWithConn(logf logger.Logf, conn *nftables.Conn) *nftablesRunner { nft4 := &nftable{Proto: nftables.TableFamilyIPv4} v6err := CheckIPv6(logf) @@ -609,8 +613,8 @@ func newNfTablesRunner(logf logger.Logf) (*nftablesRunner, error) { if supportsV6 { nft6 = &nftable{Proto: nftables.TableFamilyIPv6} - logf("v6nat availability: true") } + logf("netfilter running in nftables mode, v6 = %v", supportsV6) // TODO(KevinLiang10): convert iptables rule to nftable rules if they exist in the iptables @@ -619,7 +623,7 @@ func newNfTablesRunner(logf logger.Logf) (*nftablesRunner, error) { nft4: nft4, nft6: nft6, v6Available: supportsV6, - }, nil + } } // newLoadSaddrExpr creates a new nftables expression that loads the source @@ -837,24 +841,15 @@ func (n *nftablesRunner) DelLoopbackRule(addr netip.Addr) error { return n.conn.Flush() } -// getTables gets the available nftable in nftables runner. +// getTables returns tables for IP families that this host was determined to +// support (either IPv4 and IPv6 or just IPv4). func (n *nftablesRunner) getTables() []*nftable { - if n.v6Available { + if n.HasIPV6() { return []*nftable{n.nft4, n.nft6} } return []*nftable{n.nft4} } -// getNATTables gets the available nftable in nftables runner. -// If the system does not support IPv6 NAT, only the IPv4 nftable -// will be returned. -func (n *nftablesRunner) getNATTables() []*nftable { - if n.v6NATAvailable { - return n.getTables() - } - return []*nftable{n.nft4} -} - // AddChains creates custom Tailscale chains in netfilter via nftables // if the ts-chain doesn't already exist. func (n *nftablesRunner) AddChains() error { @@ -883,9 +878,7 @@ func (n *nftablesRunner) AddChains() error { if err = createChainIfNotExist(n.conn, chainInfo{filter, chainNameInput, chainTypeRegular, nil, nil, nil}); err != nil { return fmt.Errorf("create input chain: %w", err) } - } - for _, table := range n.getNATTables() { // Create the nat table if it doesn't exist, this table name is the same // as the name used by iptables-nft and ufw. We install rules into the // same conventional table so that `accept` verdicts from our jump @@ -923,7 +916,7 @@ const ( // can be used. It cleans up the dummy chains after creation. func (n *nftablesRunner) createDummyPostroutingChains() (retErr error) { polAccept := ptr.To(nftables.ChainPolicyAccept) - for _, table := range n.getNATTables() { + for _, table := range n.getTables() { nat, err := createTableIfNotExist(n.conn, table.Proto, tsDummyTableName) if err != nil { return fmt.Errorf("create nat table: %w", err) @@ -980,7 +973,7 @@ func (n *nftablesRunner) DelChains() error { return fmt.Errorf("delete chain: %w", err) } - if n.v6NATAvailable { + if n.HasIPV6NAT() { if err := deleteChainIfExists(n.conn, n.nft6.Nat, chainNamePostrouting); err != nil { return fmt.Errorf("delete chain: %w", err) } @@ -1046,9 +1039,7 @@ func (n *nftablesRunner) AddHooks() error { if err != nil { return fmt.Errorf("Addhook: %w", err) } - } - for _, table := range n.getNATTables() { postroutingChain, err := getChainFromTable(conn, table.Nat, "POSTROUTING") if err != nil { return fmt.Errorf("get INPUT chain: %w", err) @@ -1102,9 +1093,7 @@ func (n *nftablesRunner) DelHooks(logf logger.Logf) error { if err != nil { return fmt.Errorf("delhook: %w", err) } - } - for _, table := range n.getNATTables() { postroutingChain, err := getChainFromTable(conn, table.Nat, "POSTROUTING") if err != nil { return fmt.Errorf("get INPUT chain: %w", err) @@ -1612,9 +1601,7 @@ func (n *nftablesRunner) DelBase() error { return fmt.Errorf("get forward chain: %v", err) } conn.FlushChain(forwardChain) - } - for _, table := range n.getNATTables() { postrouteChain, err := getChainFromTable(conn, table.Nat, chainNamePostrouting) if err != nil { return fmt.Errorf("get postrouting chain v4: %v", err) @@ -1684,7 +1671,7 @@ func addMatchSubnetRouteMarkRule(conn *nftables.Conn, table *nftables.Table, cha func (n *nftablesRunner) AddSNATRule() error { conn := n.conn - for _, table := range n.getNATTables() { + for _, table := range n.getTables() { chain, err := getChainFromTable(conn, table.Nat, chainNamePostrouting) if err != nil { return fmt.Errorf("get postrouting chain v4: %w", err) @@ -1727,7 +1714,7 @@ func (n *nftablesRunner) DelSNATRule() error { &expr.Masq{}, } - for _, table := range n.getNATTables() { + for _, table := range n.getTables() { chain, err := getChainFromTable(conn, table.Nat, chainNamePostrouting) if err != nil { return fmt.Errorf("get postrouting chain v4: %w", err) diff --git a/util/linuxfw/nftables_runner_test.go b/util/linuxfw/nftables_runner_test.go index a545d3c3c59e7..ebf514c79b1f0 100644 --- a/util/linuxfw/nftables_runner_test.go +++ b/util/linuxfw/nftables_runner_test.go @@ -20,6 +20,8 @@ import ( "github.com/mdlayher/netlink" "github.com/vishvananda/netns" "tailscale.com/net/tsaddr" + "tailscale.com/tstest" + "tailscale.com/types/logger" ) // nfdump returns a hexdump of 4 bytes per line (like nft --debug=all), allowing @@ -503,19 +505,6 @@ func cleanupSysConn(t *testing.T, ns netns.NsHandle) { } } -func newFakeNftablesRunner(t *testing.T, conn *nftables.Conn) *nftablesRunner { - nft4 := &nftable{Proto: nftables.TableFamilyIPv4} - nft6 := &nftable{Proto: nftables.TableFamilyIPv6} - - return &nftablesRunner{ - conn: conn, - nft4: nft4, - nft6: nft6, - v6Available: true, - v6NATAvailable: true, - } -} - func checkChains(t *testing.T, conn *nftables.Conn, fam nftables.TableFamily, wantCount int) { t.Helper() got, err := conn.ListChainsOfTableFamily(fam) @@ -526,42 +515,76 @@ func checkChains(t *testing.T, conn *nftables.Conn, fam nftables.TableFamily, wa t.Fatalf("len(got) = %d, want %d", len(got), wantCount) } } - -func TestAddAndDelNetfilterChains(t *testing.T) { - conn := newSysConn(t) - checkChains(t, conn, nftables.TableFamilyIPv4, 0) - checkChains(t, conn, nftables.TableFamilyIPv6, 0) - - runner := newFakeNftablesRunner(t, conn) - if err := runner.AddChains(); err != nil { - t.Fatalf("runner.AddChains() failed: %v", err) - } - - tables, err := conn.ListTables() +func checkTables(t *testing.T, conn *nftables.Conn, fam nftables.TableFamily, wantCount int) { + t.Helper() + got, err := conn.ListTablesOfFamily(fam) if err != nil { - t.Fatalf("conn.ListTables() failed: %v", err) + t.Fatalf("conn.ListTablesOfFamily(%v) failed: %v", fam, err) } - - if len(tables) != 4 { - t.Fatalf("len(tables) = %d, want 4", len(tables)) + if len(got) != wantCount { + t.Fatalf("len(got) = %d, want %d", len(got), wantCount) } +} - checkChains(t, conn, nftables.TableFamilyIPv4, 6) - checkChains(t, conn, nftables.TableFamilyIPv6, 6) +func TestAddAndDelNetfilterChains(t *testing.T) { + type test struct { + hostHasIPv6 bool + initIPv4ChainCount int + initIPv6ChainCount int + ipv4TableCount int + ipv6TableCount int + ipv4ChainCount int + ipv6ChainCount int + ipv4ChainCountPostDelete int + ipv6ChainCountPostDelete int + } + tests := []test{ + { + hostHasIPv6: true, + initIPv4ChainCount: 0, + initIPv6ChainCount: 0, + ipv4TableCount: 2, + ipv6TableCount: 2, + ipv4ChainCount: 6, + ipv6ChainCount: 6, + ipv4ChainCountPostDelete: 3, + ipv6ChainCountPostDelete: 3, + }, + { // host without IPv6 support + ipv4TableCount: 2, + ipv4ChainCount: 6, + ipv4ChainCountPostDelete: 3, + }} + for _, tt := range tests { + t.Logf("running a test case for IPv6 support: %v", tt.hostHasIPv6) + conn := newSysConn(t) + runner := newFakeNftablesRunnerWithConn(t, conn, tt.hostHasIPv6) + + // Check that we start off with no chains. + checkChains(t, conn, nftables.TableFamilyIPv4, tt.initIPv4ChainCount) + checkChains(t, conn, nftables.TableFamilyIPv6, tt.initIPv6ChainCount) - runner.DelChains() + if err := runner.AddChains(); err != nil { + t.Fatalf("runner.AddChains() failed: %v", err) + } - // The default chains should still be present. - checkChains(t, conn, nftables.TableFamilyIPv4, 3) - checkChains(t, conn, nftables.TableFamilyIPv6, 3) + // Check that the amount of tables for each IP family is as expected. + checkTables(t, conn, nftables.TableFamilyIPv4, tt.ipv4TableCount) + checkTables(t, conn, nftables.TableFamilyIPv6, tt.ipv6TableCount) - tables, err = conn.ListTables() - if err != nil { - t.Fatalf("conn.ListTables() failed: %v", err) - } + // Check that the amount of chains for each IP family is as expected. + checkChains(t, conn, nftables.TableFamilyIPv4, tt.ipv4ChainCount) + checkChains(t, conn, nftables.TableFamilyIPv6, tt.ipv6ChainCount) + + if err := runner.DelChains(); err != nil { + t.Fatalf("runner.DelChains() failed: %v", err) + } - if len(tables) != 4 { - t.Fatalf("len(tables) = %d, want 4", len(tables)) + // Test that the tables as well as the default chains are still present. + checkChains(t, conn, nftables.TableFamilyIPv4, tt.ipv4ChainCountPostDelete) + checkChains(t, conn, nftables.TableFamilyIPv6, tt.ipv6ChainCountPostDelete) + checkTables(t, conn, nftables.TableFamilyIPv4, tt.ipv4TableCount) + checkTables(t, conn, nftables.TableFamilyIPv6, tt.ipv6TableCount) } } @@ -665,7 +688,8 @@ func checkChainRules(t *testing.T, conn *nftables.Conn, chain *nftables.Chain, w func TestNFTAddAndDelNetfilterBase(t *testing.T) { conn := newSysConn(t) - runner := newFakeNftablesRunner(t, conn) + runner := newFakeNftablesRunnerWithConn(t, conn, true) + if err := runner.AddChains(); err != nil { t.Fatalf("AddChains() failed: %v", err) } @@ -759,7 +783,7 @@ func findLoopBackRule(conn *nftables.Conn, proto nftables.TableFamily, table *nf func TestNFTAddAndDelLoopbackRule(t *testing.T) { conn := newSysConn(t) - runner := newFakeNftablesRunner(t, conn) + runner := newFakeNftablesRunnerWithConn(t, conn, true) if err := runner.AddChains(); err != nil { t.Fatalf("AddChains() failed: %v", err) } @@ -817,7 +841,7 @@ func TestNFTAddAndDelLoopbackRule(t *testing.T) { func TestNFTAddAndDelHookRule(t *testing.T) { conn := newSysConn(t) - runner := newFakeNftablesRunner(t, conn) + runner := newFakeNftablesRunnerWithConn(t, conn, true) if err := runner.AddChains(); err != nil { t.Fatalf("AddChains() failed: %v", err) } @@ -868,11 +892,11 @@ func (t *testFWDetector) nftDetect() (int, error) { // postrouting chains are cleaned up. func TestCreateDummyPostroutingChains(t *testing.T) { conn := newSysConn(t) - runner := newFakeNftablesRunner(t, conn) + runner := newFakeNftablesRunnerWithConn(t, conn, true) if err := runner.createDummyPostroutingChains(); err != nil { t.Fatalf("createDummyPostroutingChains() failed: %v", err) } - for _, table := range runner.getNATTables() { + for _, table := range runner.getTables() { nt, err := getTableIfExists(conn, table.Proto, tsDummyTableName) if err != nil { t.Fatalf("getTableIfExists() failed: %v", err) @@ -929,3 +953,14 @@ func TestPickFirewallModeFromInstalledRules(t *testing.T) { }) } } + +func newFakeNftablesRunnerWithConn(t *testing.T, conn *nftables.Conn, hasIPv6 bool) *nftablesRunner { + t.Helper() + if !hasIPv6 { + tstest.Replace(t, &checkIPv6ForTest, func(logger.Logf) error { + return errors.New("test: no IPv6") + }) + + } + return newNfTablesRunnerWithConn(t.Logf, conn) +} From 78566fd5cb1ac503f370de6018c4892d900c9d91 Mon Sep 17 00:00:00 2001 From: Nick O'Neill Date: Tue, 14 May 2024 13:44:56 -0700 Subject: [PATCH 4/6] VERSION.txt: this is v1.66.2 Signed-off-by: Nick O'Neill --- VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.txt b/VERSION.txt index 0403bed10c327..21dc31f09de37 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.66.1 +1.66.2 From 8ff13e9b7c57144eab1d182897a2db87b12c5777 Mon Sep 17 00:00:00 2001 From: Andrea Gottardo Date: Tue, 14 May 2024 12:15:13 -0700 Subject: [PATCH 5/6] version: fix macOS uploads by increasing build number prefix (#12134) Fixes tailscale/corp#19979 A build with version number 275 was uploaded to the App Store without bumping OSS first. The presence of that build is causing any 274.* build to be rejected. To address this, added -1 to the year component, which means new builds will use the 275.* prefix. Signed-off-by: Andrea Gottardo (cherry picked from commit 60266be29878ceba887ea9195c257fcc29593cfe) --- version/mkversion/mkversion.go | 5 ++++- version/mkversion/mkversion_test.go | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/version/mkversion/mkversion.go b/version/mkversion/mkversion.go index 49f97be534f78..7d0a0f5eba49e 100644 --- a/version/mkversion/mkversion.go +++ b/version/mkversion/mkversion.go @@ -291,7 +291,10 @@ func mkOutput(v verInfo) (VersionInfo, error) { // so that we we're still in the same range. This way if Apple goes back to // auto-incrementing the number for us, we can go back to it with // reasonable-looking numbers. - ret.XcodeMacOS = fmt.Sprintf("%d.%d.%d", otherTime.Year()-1750, otherTime.YearDay(), otherTime.Hour()*60*60+otherTime.Minute()*60+otherTime.Second()) + // In May 2024, a build with version number 275 was uploaded to the App Store + // by mistake, causing any 274.* build to be rejected. To address this, +1 was + // added, causing all builds to use the 275.* prefix. + ret.XcodeMacOS = fmt.Sprintf("%d.%d.%d", otherTime.Year()-1750+1, otherTime.YearDay(), otherTime.Hour()*60*60+otherTime.Minute()*60+otherTime.Second()) } return ret, nil diff --git a/version/mkversion/mkversion_test.go b/version/mkversion/mkversion_test.go index 1ae2372b58f5e..210d3053a14a3 100644 --- a/version/mkversion/mkversion_test.go +++ b/version/mkversion/mkversion_test.go @@ -81,7 +81,7 @@ func TestMkversion(t *testing.T) { VERSION_TRACK="unstable" VERSION_EXTRA_HASH="defghi" VERSION_XCODE="101.15.129" - VERSION_XCODE_MACOS="273.27.3723" + VERSION_XCODE_MACOS="274.27.3723" VERSION_WINRES="1,15,129,0" VERSION_MSIPRODUCT_AMD64="89C96952-1FB8-5A4D-B02E-16A8060C56AA" VERSION_MSIPRODUCT_ARM64="DB1A2E86-66C4-5CEC-8F4C-7DB805370F3A" @@ -104,7 +104,7 @@ func TestMkversion(t *testing.T) { VERSION_TRACK="unstable" VERSION_EXTRA_HASH="defghi" VERSION_XCODE="101.15.129" - VERSION_XCODE_MACOS="273.27.3723" + VERSION_XCODE_MACOS="274.27.3723" VERSION_WINRES="1,15,129,0" VERSION_MSIPRODUCT_AMD64="89C96952-1FB8-5A4D-B02E-16A8060C56AA" VERSION_MSIPRODUCT_ARM64="DB1A2E86-66C4-5CEC-8F4C-7DB805370F3A" From eae73f821381ce653468ba3c4eec3453c0808953 Mon Sep 17 00:00:00 2001 From: Nick O'Neill Date: Tue, 14 May 2024 14:16:13 -0700 Subject: [PATCH 6/6] VERSION.txt: this is v1.66.3 Signed-off-by: Nick O'Neill --- VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.txt b/VERSION.txt index 21dc31f09de37..3cb930ab0301b 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.66.2 +1.66.3