diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 28a836e975273..f36b0259f1091 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -192,8 +192,12 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin r.recorder.Event(ing, corev1.EventTypeWarning, "InvalidIngressConfiguration", err.Error()) return false, nil } + customTLS, err := customTLSForIngress(ctx, r.Client, ing) + if err != nil { + return false, fmt.Errorf("failed to configure custom TLS for Ingress: %w", err) + } - if !IsHTTPSEnabledOnTailnet(r.tsnetServer) { + if customTLS == nil && !IsHTTPSEnabledOnTailnet(r.tsnetServer) { r.recorder.Event(ing, corev1.EventTypeWarning, "HTTPSNotEnabled", "HTTPS is not enabled on the tailnet; ingress may not work") } @@ -250,8 +254,13 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin if err != nil { return false, fmt.Errorf("error determining DNS name for service: %w", err) } + httpsHost := dnsName + if customTLS != nil { + httpsHost = customTLS.host + } + serviceHosts := ingressHTTPSHosts(dnsName, customTLS) - if err = r.ensureCertResources(ctx, pg, dnsName, ing); err != nil { + if err = r.ensureCertResources(ctx, pg, dnsName, ing, customTLS); err != nil { return false, fmt.Errorf("error ensuring cert resources: %w", err) } @@ -264,8 +273,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin logger.Infof("no Ingress serve config ConfigMap found, unable to update serve config. Ensure that ProxyGroup is healthy.") return svcsChanged, nil } - ep := ipn.HostPort(fmt.Sprintf("%s:443", dnsName)) - handlers, err := handlersForIngress(ctx, ing, r.Client, r.recorder, dnsName, logger) + handlers, err := handlersForIngress(ctx, ing, r.Client, r.recorder, httpsHost, logger) if err != nil { return false, fmt.Errorf("failed to get handlers for Ingress: %w", err) } @@ -275,40 +283,34 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin HTTPS: true, }, }, - Web: map[ipn.HostPort]*ipn.WebServerConfig{ - ep: { - Handlers: handlers, - }, - }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{}, + } + for _, host := range serviceHosts { + ingCfg.Web[ipn.HostPort(fmt.Sprintf("%s:443", host))] = &ipn.WebServerConfig{Handlers: handlers} } // Add HTTP endpoint if configured. if isHTTPEndpointEnabled(ing) { logger.Infof("exposing Ingress over HTTP") - epHTTP := ipn.HostPort(fmt.Sprintf("%s:80", dnsName)) ingCfg.TCP[80] = &ipn.TCPPortHandler{ HTTP: true, } - ingCfg.Web[epHTTP] = &ipn.WebServerConfig{ - Handlers: handlers, + for _, host := range serviceHosts { + ingCfg.Web[ipn.HostPort(fmt.Sprintf("%s:80", host))] = &ipn.WebServerConfig{Handlers: handlers} } if isHTTPRedirectEnabled(ing) { logger.Warnf("Both HTTP endpoint and HTTP redirect flags are enabled: ignoring HTTP redirect.") } } else if isHTTPRedirectEnabled(ing) { logger.Infof("HTTP redirect enabled, setting up port 80 redirect handlers") - epHTTP := ipn.HostPort(fmt.Sprintf("%s:80", dnsName)) ingCfg.TCP[80] = &ipn.TCPPortHandler{HTTP: true} - ingCfg.Web[epHTTP] = &ipn.WebServerConfig{ - Handlers: map[string]*ipn.HTTPHandler{}, - } - web80 := ingCfg.Web[epHTTP] - for mountPoint := range handlers { - // We send a 301 - Moved Permanently redirect from HTTP to HTTPS - redirectURL := "301:https://${HOST}${REQUEST_URI}" - logger.Debugf("Creating redirect handler: %s -> %s", mountPoint, redirectURL) - web80.Handlers[mountPoint] = &ipn.HTTPHandler{ - Redirect: redirectURL, + for _, host := range serviceHosts { + epHTTP := ipn.HostPort(fmt.Sprintf("%s:80", host)) + ingCfg.Web[epHTTP] = &ipn.WebServerConfig{Handlers: map[string]*ipn.HTTPHandler{}} + for mountPoint := range handlers { + redirectURL := "301:https://${HOST}${REQUEST_URI}" + logger.Debugf("Creating redirect handler: %s -> %s", mountPoint, redirectURL) + ingCfg.Web[epHTTP].Handlers[mountPoint] = &ipn.HTTPHandler{Redirect: redirectURL} } } } @@ -370,7 +372,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin if isHTTPEndpointEnabled(ing) || isHTTPRedirectEnabled(ing) { mode = serviceAdvertisementHTTPAndHTTPS } - if err = r.maybeUpdateAdvertiseServicesConfig(ctx, serviceName, mode, pg); err != nil { + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, serviceName, mode, pg, customTLS != nil); err != nil { return false, fmt.Errorf("failed to update tailscaled config: %w", err) } @@ -391,8 +393,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin if err != nil { return false, fmt.Errorf("error checking TLS credentials provisioned for Ingress: %w", err) } - // If TLS certs have not been issued (yet), do not set port 443. - if hasCerts { + if customTLS != nil || hasCerts { ports = append(ports, networkingv1.IngressPortStatus{ Protocol: "TCP", Port: 443, @@ -407,7 +408,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin // Set Ingress status hostname only if either port 443 or 80 is advertised. var hostname string if len(ports) != 0 { - hostname = dnsName + hostname = httpsHost } ing.Status.LoadBalancer.Ingress = []networkingv1.IngressLoadBalancerIngress{ { @@ -485,7 +486,7 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, logger } // Make sure the Tailscale Service is not advertised in tailscaled or serve config. - if err = r.maybeUpdateAdvertiseServicesConfig(ctx, tsSvcName, serviceAdvertisementOff, pg); err != nil { + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, tsSvcName, serviceAdvertisementOff, pg, false); err != nil { return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } @@ -571,7 +572,7 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, } // 4. Unadvertise the Tailscale Service in tailscaled config. - if err = r.maybeUpdateAdvertiseServicesConfig(ctx, serviceName, serviceAdvertisementOff, pg); err != nil { + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, serviceName, serviceAdvertisementOff, pg, false); err != nil { return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } @@ -646,6 +647,7 @@ func (r *HAIngressReconciler) shouldExpose(ing *networkingv1.Ingress) bool { // validateIngress validates that the Ingress is properly configured. // Currently validates: // - Any tags provided via tailscale.com/tags annotation are valid Tailscale ACL tags +// - Any accept-app-caps provided via tailscale.com/accept-app-caps annotation are valid capability names // - The derived hostname is a valid DNS label // - The referenced ProxyGroup exists and is of type 'ingress' // - Ingress' TLS block is invalid @@ -658,6 +660,16 @@ func (r *HAIngressReconciler) validateIngress(ctx context.Context, ing *networki errs = append(errs, fmt.Errorf("Ingress contains invalid tags: %v", strings.Join(violations, ","))) } + // Validate accept-app-caps if present + if raw, ok := ing.Annotations[AnnotationAcceptAppCaps]; ok && raw != "" { + for _, p := range strings.Split(raw, ",") { + p = strings.TrimSpace(p) + if p != "" && !validAppCap.MatchString(p) { + errs = append(errs, fmt.Errorf("invalid app capability %q", p)) + } + } + } + // Validate TLS configuration if len(ing.Spec.TLS) > 0 && (len(ing.Spec.TLS) > 1 || len(ing.Spec.TLS[0].Hosts) > 1) { errs = append(errs, fmt.Errorf("Ingress contains invalid TLS block %v: only a single TLS entry with a single host is allowed", ing.Spec.TLS)) @@ -755,7 +767,7 @@ const ( serviceAdvertisementHTTPAndHTTPS // Both ports 80 and 443 should be advertised ) -func (r *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, serviceName tailcfg.ServiceName, mode serviceAdvertisementMode, pg *tsapi.ProxyGroup) (err error) { +func (r *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, serviceName tailcfg.ServiceName, mode serviceAdvertisementMode, pg *tsapi.ProxyGroup, customTLSReady bool) (err error) { // Get all config Secrets for this ProxyGroup. secrets := &corev1.SecretList{} if err := r.List(ctx, secrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeConfig))); err != nil { @@ -775,7 +787,7 @@ func (r *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con return fmt.Errorf("error checking TLS credentials provisioned for service %q: %w", serviceName, err) } shouldBeAdvertised := (mode == serviceAdvertisementHTTPAndHTTPS) || - (mode == serviceAdvertisementHTTPS && hasCert) // if we only expose port 443 and don't have certs (yet), do not advertise + (mode == serviceAdvertisementHTTPS && (hasCert || customTLSReady)) // if we only expose port 443 and don't have certs (yet), do not advertise for _, secret := range secrets.Items { var updated bool @@ -937,15 +949,19 @@ func ownersAreSetAndEqual(a, b *tailscale.VIPService) bool { // (domain) is a valid Kubernetes resource name. // https://github.com/tailscale/tailscale/blob/8b1e7f646ee4730ad06c9b70c13e7861b964949b/util/dnsname/dnsname.go#L99 // https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names -func (r *HAIngressReconciler) ensureCertResources(ctx context.Context, pg *tsapi.ProxyGroup, domain string, ing *networkingv1.Ingress) error { +func (r *HAIngressReconciler) ensureCertResources(ctx context.Context, pg *tsapi.ProxyGroup, domain string, ing *networkingv1.Ingress, customTLS *ingressCustomTLS) error { secret := certSecret(pg.Name, r.tsNamespace, domain, ing) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, secret, func(s *corev1.Secret) { // Labels might have changed if the Ingress has been updated to use a // different ProxyGroup. s.Labels = secret.Labels + s.Type = secret.Type }); err != nil { return fmt.Errorf("failed to create or update Secret %s: %w", secret.Name, err) } + if err := ensureCustomTLSStateSecrets(ctx, r.Client, r.tsNamespace, pg, customTLS); err != nil { + return fmt.Errorf("failed to ensure custom TLS state Secrets: %w", err) + } role := certSecretRole(pg.Name, r.tsNamespace, domain) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { // Labels might have changed if the Ingress has been updated to use a @@ -1046,18 +1062,15 @@ func certSecretRoleBinding(pg *tsapi.ProxyGroup, namespace, domain string) *rbac // certSecret creates a Secret that will store the TLS certificate and private // key for the given domain. Domain must be a valid Kubernetes resource name. + func certSecret(pgName, namespace, domain string, parent client.Object) *corev1.Secret { labels := certResourceLabels(pgName, domain) labels[kubetypes.LabelSecretType] = kubetypes.LabelSecretTypeCerts // Labels that let us identify the Ingress resource lets us reconcile // the Ingress when the TLS Secret is updated (for example, when TLS // certs have been provisioned). - labels[LabelParentType] = strings.ToLower(parent.GetObjectKind().GroupVersionKind().Kind) - labels[LabelParentName] = parent.GetName() - if ns := parent.GetNamespace(); ns != "" { - labels[LabelParentNamespace] = ns - } - return &corev1.Secret{ + mkParentLabels(&labels, parent) + secret := &corev1.Secret{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Secret", @@ -1073,6 +1086,7 @@ func certSecret(pgName, namespace, domain string, parent client.Object) *corev1. }, Type: corev1.SecretTypeTLS, } + return secret } func certResourceLabels(pgName, domain string) map[string]string { diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 33e27ef371d90..25d4940ba8c04 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -32,6 +32,8 @@ import ( tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" + "tailscale.com/types/ptr" + "tailscale.com/util/mak" ) func TestIngressPGReconciler(t *testing.T) { @@ -1187,6 +1189,183 @@ func createPGResources(t *testing.T, fc client.Client, pgName string) { } } +func TestIngressPGReconciler_AcceptAppCaps(t *testing.T) { + ingPGR, fc, ft := setupIngressTest(t) + + // Create backend Service that the Ingress will route to + backendSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.0.0.1", + Ports: []corev1.ServicePort{ + { + Port: 8080, + }, + }, + }, + } + mustCreate(t, fc, backendSvc) + + // Create test Ingress with accept-app-caps annotation + ing := &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test-pg", + "tailscale.com/accept-app-caps": "example.com/cap/monitoring,example.com/cap/admin", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test", + Port: networkingv1.ServiceBackendPort{ + Number: 8080, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"my-svc"}}, + }, + }, + } + if err := fc.Create(context.Background(), ing); err != nil { + t.Fatal(err) + } + + // Reconcile + expectReconciled(t, ingPGR, "default", "test-ingress") + populateTLSSecret(t, fc, "test-pg", "my-svc.ts.net") + expectReconciled(t, ingPGR, "default", "test-ingress") + + // Verify Tailscale Service + verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"}) + + // Verify the serve config has AcceptAppCaps on handlers + cm := &corev1.ConfigMap{} + if err := fc.Get(context.Background(), types.NamespacedName{ + Name: "test-pg-ingress-config", + Namespace: "operator-ns", + }, cm); err != nil { + t.Fatalf("getting ConfigMap: %v", err) + } + + cfg := &ipn.ServeConfig{} + if err := json.Unmarshal(cm.BinaryData["serve-config.json"], cfg); err != nil { + t.Fatalf("unmarshaling serve config: %v", err) + } + + svc := cfg.Services[tailcfg.ServiceName("svc:my-svc")] + if svc == nil { + t.Fatal("service svc:my-svc not found in serve config") + } + + ep := ipn.HostPort("my-svc.ts.net:443") + webCfg := svc.Web[ep] + if webCfg == nil { + t.Fatalf("web config for %q not found", ep) + } + + handler := webCfg.Handlers["/"] + if handler == nil { + t.Fatal("handler for path / not found") + } + + wantCaps := []tailcfg.PeerCapability{"example.com/cap/monitoring", "example.com/cap/admin"} + if !reflect.DeepEqual(handler.AcceptAppCaps, wantCaps) { + t.Errorf("AcceptAppCaps = %v, want %v", handler.AcceptAppCaps, wantCaps) + } +} + +func TestIngressPGReconciler_CustomTLSSecret(t *testing.T) { + ingPGR, fc, ft := setupIngressTest(t) + + backendSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.0.0.1", + Ports: []corev1.ServicePort{{Port: 8080}}, + }, + } + mustCreate(t, fc, backendSvc) + mustCreate(t, fc, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "wildcard-cert", Namespace: "default"}, + Type: corev1.SecretTypeTLS, + Data: map[string][]byte{ + corev1.TLSCertKey: []byte("fake-cert"), + corev1.TLSPrivateKeyKey: []byte("fake-key"), + }, + }) + + ing := &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test-pg", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test", + Port: networkingv1.ServiceBackendPort{Number: 8080}, + }, + }, + TLS: []networkingv1.IngressTLS{{Hosts: []string{"zerg.zergrush.dev"}, SecretName: "wildcard-cert"}}, + }, + } + mustCreate(t, fc, ing) + + expectReconciled(t, ingPGR, "default", "test-ingress") + verifyTailscaleService(t, ft, "svc:zerg", []string{"tcp:443"}) + verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:zerg"}) + + cm := &corev1.ConfigMap{} + if err := fc.Get(context.Background(), types.NamespacedName{Name: "test-pg-ingress-config", Namespace: "operator-ns"}, cm); err != nil { + t.Fatalf("getting ConfigMap: %v", err) + } + cfg := &ipn.ServeConfig{} + if err := json.Unmarshal(cm.BinaryData[serveConfigKey], cfg); err != nil { + t.Fatalf("unmarshaling serve config: %v", err) + } + svc := cfg.Services[tailcfg.ServiceName("svc:zerg")] + if svc == nil { + t.Fatal("service svc:zerg not found in serve config") + } + if _, ok := svc.Web[ipn.HostPort("zerg.zergrush.dev:443")]; !ok { + t.Fatalf("expected custom HTTPS host in service config, got keys %v", maps.Keys(svc.Web)) + } + if _, ok := svc.Web[ipn.HostPort("zerg.ts.net:443")]; !ok { + t.Fatalf("expected MagicDNS HTTPS host in service config, got keys %v", maps.Keys(svc.Web)) + } + + expectedTLSSecret := certSecret("test-pg", "operator-ns", "zerg.ts.net", ing) + expectEqual(t, fc, expectedTLSSecret) + expectEqual(t, fc, certSecretRole("test-pg", "operator-ns", "zerg.ts.net")) + pg := &tsapi.ProxyGroup{ObjectMeta: metav1.ObjectMeta{Name: "test-pg"}} + expectEqual(t, fc, certSecretRoleBinding(pg, "operator-ns", "zerg.ts.net")) + + stateSecret := &corev1.Secret{} + if err := fc.Get(context.Background(), types.NamespacedName{Name: "test-pg-0", Namespace: "operator-ns"}, stateSecret); err != nil { + t.Fatalf("getting state Secret: %v", err) + } + expectedStateSecret := stateSecret.DeepCopy() + mak.Set(&expectedStateSecret.Data, "zerg.zergrush.dev.crt", []byte("fake-cert")) + mak.Set(&expectedStateSecret.Data, "zerg.zergrush.dev.key", []byte("fake-key")) + expectEqual(t, fc, expectedStateSecret) +} + func setupIngressTest(t *testing.T) (*HAIngressReconciler, client.Client, *fakeTSClient) { tsIngressClass := &networkingv1.IngressClass{ ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index 4952e789f6a02..67ae14c5822b5 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -8,6 +8,7 @@ package main import ( "context" "fmt" + "regexp" "slices" "strings" "sync" @@ -25,6 +26,7 @@ import ( "tailscale.com/ipn" "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" "tailscale.com/types/opt" "tailscale.com/util/clientmetric" "tailscale.com/util/mak" @@ -162,33 +164,34 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga gaugeIngressResources.Set(int64(a.managedIngresses.Len())) a.mu.Unlock() - if !IsHTTPSEnabledOnTailnet(a.ssr.tsnetServer) { + customTLS, err := customTLSForIngress(ctx, a.Client, ing) + if err != nil { + return fmt.Errorf("failed to configure custom TLS for ingress: %w", err) + } + + if customTLS == nil && !IsHTTPSEnabledOnTailnet(a.ssr.tsnetServer) { a.recorder.Event(ing, corev1.EventTypeWarning, "HTTPSNotEnabled", "HTTPS is not enabled on the tailnet; ingress may not work") } - // magic443 is a fake hostname that we can use to tell containerboot to swap - // out with the real hostname once it's known. - const magic443 = "${TS_CERT_DOMAIN}:443" + httpsHosts := ingressHTTPSHosts("${TS_CERT_DOMAIN}", customTLS) sc := &ipn.ServeConfig{ TCP: map[uint16]*ipn.TCPPortHandler{ 443: { HTTPS: true, }, }, - Web: map[ipn.HostPort]*ipn.WebServerConfig{ - magic443: { - Handlers: map[string]*ipn.HTTPHandler{}, - }, - }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{}, + } + for _, host := range httpsHosts { + sc.Web[ipn.HostPort(host+":443")] = &ipn.WebServerConfig{Handlers: map[string]*ipn.HTTPHandler{}} } if opt.Bool(ing.Annotations[AnnotationFunnel]).EqualBool(true) { - sc.AllowFunnel = map[ipn.HostPort]bool{ - magic443: true, + sc.AllowFunnel = map[ipn.HostPort]bool{} + for _, host := range httpsHosts { + sc.AllowFunnel[ipn.HostPort(host+":443")] = true } } - web := sc.Web[magic443] - var tlsHost string // hostname or FQDN or empty if ing.Spec.TLS != nil && len(ing.Spec.TLS) > 0 && len(ing.Spec.TLS[0].Hosts) > 0 { tlsHost = ing.Spec.TLS[0].Hosts[0] @@ -197,30 +200,28 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga if err != nil { return fmt.Errorf("failed to get handlers for ingress: %w", err) } - web.Handlers = handlers - if len(web.Handlers) == 0 { + if len(handlers) == 0 { logger.Warn("Ingress contains no valid backends") a.recorder.Eventf(ing, corev1.EventTypeWarning, "NoValidBackends", "no valid backends") return nil } + for _, host := range httpsHosts { + sc.Web[ipn.HostPort(host+":443")].Handlers = handlers + } if isHTTPRedirectEnabled(ing) { logger.Infof("HTTP redirect enabled, setting up port 80 redirect handlers") - const magic80 = "${TS_CERT_DOMAIN}:80" sc.TCP[80] = &ipn.TCPPortHandler{HTTP: true} - sc.Web[magic80] = &ipn.WebServerConfig{ - Handlers: map[string]*ipn.HTTPHandler{}, - } - if sc.AllowFunnel != nil && sc.AllowFunnel[magic443] { - sc.AllowFunnel[magic80] = true - } - web80 := sc.Web[magic80] - for mountPoint := range handlers { - // We send a 301 - Moved Permanently redirect from HTTP to HTTPS - redirectURL := "301:https://${HOST}${REQUEST_URI}" - logger.Debugf("Creating redirect handler: %s -> %s", mountPoint, redirectURL) - web80.Handlers[mountPoint] = &ipn.HTTPHandler{ - Redirect: redirectURL, + for _, host := range httpsHosts { + host80 := ipn.HostPort(host + ":80") + sc.Web[host80] = &ipn.WebServerConfig{Handlers: map[string]*ipn.HTTPHandler{}} + if sc.AllowFunnel != nil { + sc.AllowFunnel[host80] = true + } + for mountPoint := range handlers { + redirectURL := "301:https://${HOST}${REQUEST_URI}" + logger.Debugf("Creating redirect handler: %s -> %s", mountPoint, redirectURL) + sc.Web[host80].Handlers[mountPoint] = &ipn.HTTPHandler{Redirect: redirectURL} } } } @@ -244,6 +245,9 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga proxyType: proxyTypeIngressResource, LoginServer: a.ssr.loginServer, } + if customTLS != nil { + sts.CustomTLSCerts = map[string]*corev1.Secret{customTLS.host: customTLS.secret} + } if val := ing.GetAnnotations()[AnnotationExperimentalForwardClusterTrafficViaL7IngresProxy]; val == "true" { sts.ForwardClusterTrafficViaL7IngressProxy = true @@ -257,19 +261,28 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga if err != nil { return fmt.Errorf("failed to retrieve Ingress HTTPS endpoint status: %w", err) } + hasHTTPS := customTLS == nil + if customTLS != nil { + hasHTTPS = true + } ing.Status.LoadBalancer.Ingress = nil for _, dev := range devices { - if dev.ingressDNSName == "" { + if dev.ingressDNSName == "" && customTLS == nil { continue } - logger.Debugf("setting Ingress hostname to %q", dev.ingressDNSName) - ports := []networkingv1.IngressPortStatus{ - { + hostname := dev.ingressDNSName + if customTLS != nil { + hostname = customTLS.host + } + logger.Debugf("setting Ingress hostname to %q", hostname) + ports := []networkingv1.IngressPortStatus{} + if hasHTTPS { + ports = append(ports, networkingv1.IngressPortStatus{ Protocol: "TCP", Port: 443, - }, + }) } if isHTTPRedirectEnabled(ing) { ports = append(ports, networkingv1.IngressPortStatus{ @@ -278,7 +291,7 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga }) } ing.Status.LoadBalancer.Ingress = append(ing.Status.LoadBalancer.Ingress, networkingv1.IngressLoadBalancerIngress{ - Hostname: dev.ingressDNSName, + Hostname: hostname, Ports: ports, }) } @@ -320,7 +333,37 @@ func validateIngressClass(ctx context.Context, cl client.Client, ingressClassNam return nil } +// validAppCap matches application capability names of the form {domain}/{name}. +// Both parts must use the (simplified) FQDN label character set. +// The "name" can contain forward slashes. +var validAppCap = regexp.MustCompile(`^([\pL\pN-]+\.)+[\pL\pN-]+\/[\pL\pN-/]+$`) + +// parseAcceptAppCaps reads the AnnotationAcceptAppCaps annotation from the +// Ingress, splits it by comma, validates each capability name, and returns the +// valid ones. Invalid capabilities are skipped with a warning event. +func parseAcceptAppCaps(ing *networkingv1.Ingress, rec record.EventRecorder) []tailcfg.PeerCapability { + raw, ok := ing.Annotations[AnnotationAcceptAppCaps] + if !ok || raw == "" { + return nil + } + parts := strings.Split(raw, ",") + var caps []tailcfg.PeerCapability + for _, p := range parts { + p = strings.TrimSpace(p) + if p == "" { + continue + } + if !validAppCap.MatchString(p) { + rec.Eventf(ing, corev1.EventTypeWarning, "InvalidAppCapability", "ignoring invalid app capability %q", p) + continue + } + caps = append(caps, tailcfg.PeerCapability(p)) + } + return caps +} + func handlersForIngress(ctx context.Context, ing *networkingv1.Ingress, cl client.Client, rec record.EventRecorder, tlsHost string, logger *zap.SugaredLogger) (handlers map[string]*ipn.HTTPHandler, err error) { + acceptAppCaps := parseAcceptAppCaps(ing, rec) addIngressBackend := func(b *networkingv1.IngressBackend, path string) { if path == "" { path = "/" @@ -364,7 +407,8 @@ func handlersForIngress(ctx context.Context, ing *networkingv1.Ingress, cl clien proto = "https+insecure://" } mak.Set(&handlers, path, &ipn.HTTPHandler{ - Proxy: proto + svc.Spec.ClusterIP + ":" + fmt.Sprint(port) + path, + Proxy: proto + svc.Spec.ClusterIP + ":" + fmt.Sprint(port) + path, + AcceptAppCaps: acceptAppCaps, }) } addIngressBackend(ing.Spec.DefaultBackend, "/") diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index 1381193065093..0966cd7b38b8c 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -8,6 +8,7 @@ package main import ( "context" "reflect" + "strings" "testing" "go.uber.org/zap" @@ -24,7 +25,9 @@ import ( "tailscale.com/ipn" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" "tailscale.com/tstest" + "tailscale.com/types/ptr" "tailscale.com/util/mak" ) @@ -56,7 +59,21 @@ func TestTailscaleIngress(t *testing.T) { expectReconciled(t, ingR, "default", "test") - fullName, shortName := findGenName(t, fc, "default", "test", "ingress") + secretList := &corev1.SecretList{} + if err := fc.List(t.Context(), secretList, client.InNamespace("operator-ns"), client.MatchingLabels(childResourceLabels("test", "default", "ingress"))); err != nil { + t.Fatalf("listing generated secrets: %v", err) + } + fullName := "" + for _, secret := range secretList.Items { + if strings.HasSuffix(secret.Name, "-0") { + fullName = secret.Name + break + } + } + if fullName == "" { + t.Fatalf("failed to find generated state Secret among %v", secretList.Items) + } + shortName := strings.TrimSuffix(fullName, "-0") opts := configOpts{ replicas: new(int32(1)), stsName: shortName, @@ -285,7 +302,21 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { expectReconciled(t, ingR, "default", "test") - fullName, shortName := findGenName(t, fc, "default", "test", "ingress") + secretList := &corev1.SecretList{} + if err := fc.List(t.Context(), secretList, client.InNamespace("operator-ns"), client.MatchingLabels(childResourceLabels("test", "default", "ingress"))); err != nil { + t.Fatalf("listing generated secrets: %v", err) + } + fullName := "" + for _, secret := range secretList.Items { + if strings.HasSuffix(secret.Name, "-0") { + fullName = secret.Name + break + } + } + if fullName == "" { + t.Fatalf("failed to find generated state Secret among %v", secretList.Items) + } + shortName := strings.TrimSuffix(fullName, "-0") opts := configOpts{ stsName: shortName, secretName: fullName, @@ -387,7 +418,21 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { logger: zl.Sugar(), } expectReconciled(t, ingR, "default", "test") - fullName, shortName := findGenName(t, fc, "default", "test", "ingress") + secretList := &corev1.SecretList{} + if err := fc.List(t.Context(), secretList, client.InNamespace("operator-ns"), client.MatchingLabels(childResourceLabels("test", "default", "ingress"))); err != nil { + t.Fatalf("listing generated secrets: %v", err) + } + fullName := "" + for _, secret := range secretList.Items { + if strings.HasSuffix(secret.Name, "-0") { + fullName = secret.Name + break + } + } + if fullName == "" { + t.Fatalf("failed to find generated state Secret among %v", secretList.Items) + } + shortName := strings.TrimSuffix(fullName, "-0") opts := configOpts{ stsName: shortName, secretName: fullName, @@ -870,7 +915,21 @@ func TestTailscaleIngressWithHTTPRedirect(t *testing.T) { expectReconciled(t, ingR, "default", "test") - fullName, shortName := findGenName(t, fc, "default", "test", "ingress") + secretList := &corev1.SecretList{} + if err := fc.List(t.Context(), secretList, client.InNamespace("operator-ns"), client.MatchingLabels(childResourceLabels("test", "default", "ingress"))); err != nil { + t.Fatalf("listing generated secrets: %v", err) + } + fullName := "" + for _, secret := range secretList.Items { + if strings.HasSuffix(secret.Name, "-0") { + fullName = secret.Name + break + } + } + if fullName == "" { + t.Fatalf("failed to find generated state Secret among %v", secretList.Items) + } + shortName := strings.TrimSuffix(fullName, "-0") opts := configOpts{ replicas: new(int32(1)), stsName: shortName, @@ -937,3 +996,241 @@ func TestTailscaleIngressWithHTTPRedirect(t *testing.T) { t.Errorf("incorrect status ports after removing redirect: got %v, want %v", ing.Status.LoadBalancer.Ingress[0].Ports, wantPorts) } } + +func TestTailscaleIngressWithAcceptAppCaps(t *testing.T) { + fc := fake.NewFakeClient(ingressClass()) + ft := &fakeTSClient{} + fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + ingR := &IngressReconciler{ + Client: fc, + ingressClassName: "tailscale", + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + tsnetServer: fakeTsnetServer, + defaultTags: []string{"tag:k8s"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale", + }, + logger: zl.Sugar(), + } + + // 1. Create Ingress with accept-app-caps annotation + ing := ingress() + mak.Set(&ing.Annotations, AnnotationAcceptAppCaps, "example.com/cap/monitoring,example.com/cap/admin") + mustCreate(t, fc, ing) + mustCreate(t, fc, service()) + + expectReconciled(t, ingR, "default", "test") + + fullName, shortName := findGenName(t, fc, "default", "test", "ingress") + wantCaps := []tailcfg.PeerCapability{"example.com/cap/monitoring", "example.com/cap/admin"} + opts := configOpts{ + replicas: ptr.To[int32](1), + stsName: shortName, + secretName: fullName, + namespace: "default", + parentType: "ingress", + hostname: "default-test", + app: kubetypes.AppIngressResource, + serveConfig: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: {HTTPS: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": { + Proxy: "http://1.2.3.4:8080/", + AcceptAppCaps: wantCaps, + }, + }}, + }, + }, + } + + expectEqual(t, fc, expectedSecret(t, fc, opts)) + expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) +} + +func TestTailscaleIngressWithCustomTLSSecret(t *testing.T) { + fc := fake.NewFakeClient(ingressClass()) + ft := &fakeTSClient{} + fakeTsnetServer := &fakeTSNetServer{} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + ingR := &IngressReconciler{ + Client: fc, + ingressClassName: "tailscale", + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + tsnetServer: fakeTsnetServer, + defaultTags: []string{"tag:k8s"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale", + }, + logger: zl.Sugar(), + } + + ing := ingress() + ing.Spec.TLS = []networkingv1.IngressTLS{{Hosts: []string{"zerg.zergrush.dev"}, SecretName: "wildcard-cert"}} + srcTLS := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "wildcard-cert", Namespace: "default"}, + Type: corev1.SecretTypeTLS, + Data: map[string][]byte{ + corev1.TLSCertKey: []byte("fake-cert"), + corev1.TLSPrivateKeyKey: []byte("fake-key"), + }, + } + mustCreate(t, fc, ing) + mustCreate(t, fc, service()) + mustCreate(t, fc, srcTLS) + + expectReconciled(t, ingR, "default", "test") + + secretList := &corev1.SecretList{} + if err := fc.List(t.Context(), secretList, client.InNamespace("operator-ns"), client.MatchingLabels(childResourceLabels("test", "default", "ingress"))); err != nil { + t.Fatalf("listing generated secrets: %v", err) + } + fullName := "" + for _, secret := range secretList.Items { + if strings.HasSuffix(secret.Name, "-0") { + fullName = secret.Name + break + } + } + if fullName == "" { + t.Fatalf("failed to find generated state Secret among %v", secretList.Items) + } + shortName := strings.TrimSuffix(fullName, "-0") + opts := configOpts{ + replicas: ptr.To[int32](1), + stsName: shortName, + secretName: fullName, + namespace: "default", + parentType: "ingress", + hostname: "zerg", + app: kubetypes.AppIngressResource, + serveConfig: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://1.2.3.4:8080/"}, + }}, + "zerg.zergrush.dev:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://1.2.3.4:8080/"}, + }}, + }, + }, + secretExtraData: map[string][]byte{ + "zerg.zergrush.dev.crt": []byte("fake-cert"), + "zerg.zergrush.dev.key": []byte("fake-key"), + }, + } + + expectEqual(t, fc, expectedSecret(t, fc, opts)) + expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) + + mustUpdate(t, fc, "operator-ns", fullName, func(secret *corev1.Secret) { + mak.Set(&secret.Data, "device_id", []byte("1234")) + mak.Set(&secret.Data, "device_fqdn", []byte("zerg.tailnetxyz.ts.net")) + }) + expectReconciled(t, ingR, "default", "test") + + expectedIngress := ingress() + expectedIngress.Spec.TLS = []networkingv1.IngressTLS{{Hosts: []string{"zerg.zergrush.dev"}, SecretName: "wildcard-cert"}} + expectedIngress.Finalizers = append(expectedIngress.Finalizers, "tailscale.com/finalizer") + expectedIngress.Status.LoadBalancer = networkingv1.IngressLoadBalancerStatus{ + Ingress: []networkingv1.IngressLoadBalancerIngress{{ + Hostname: "zerg.zergrush.dev", + Ports: []networkingv1.IngressPortStatus{{Port: 443, Protocol: "TCP"}}, + }}, + } + expectEqual(t, fc, expectedIngress) +} + +func TestParseAcceptAppCaps(t *testing.T) { + tests := []struct { + name string + annotation string + wantCaps []tailcfg.PeerCapability + wantEvents int // number of warning events expected + }{ + { + name: "empty", + annotation: "", + wantCaps: nil, + }, + { + name: "single_valid", + annotation: "example.com/cap/monitoring", + wantCaps: []tailcfg.PeerCapability{"example.com/cap/monitoring"}, + }, + { + name: "multiple_valid", + annotation: "example.com/cap/monitoring,example.com/cap/admin", + wantCaps: []tailcfg.PeerCapability{ + "example.com/cap/monitoring", + "example.com/cap/admin", + }, + }, + { + name: "whitespace", + annotation: " example.com/cap/monitoring , example.com/cap/admin ", + wantCaps: []tailcfg.PeerCapability{ + "example.com/cap/monitoring", + "example.com/cap/admin", + }, + }, + { + name: "invalid_skipped", + annotation: "example.com/cap/valid,not-a-cap,another.com/cap/ok", + wantCaps: []tailcfg.PeerCapability{ + "example.com/cap/valid", + "another.com/cap/ok", + }, + wantEvents: 1, + }, + { + name: "all_invalid", + annotation: "bad,also-bad", + wantCaps: nil, + wantEvents: 2, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rec := record.NewFakeRecorder(10) + ing := &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + } + if tt.annotation != "" { + mak.Set(&ing.Annotations, AnnotationAcceptAppCaps, tt.annotation) + } + got := parseAcceptAppCaps(ing, rec) + if !reflect.DeepEqual(got, tt.wantCaps) { + t.Errorf("parseAcceptAppCaps() = %v, want %v", got, tt.wantCaps) + } + // Drain events and count warnings + close(rec.Events) + var gotEvents int + for range rec.Events { + gotEvents++ + } + if gotEvents != tt.wantEvents { + t.Errorf("got %d warning events, want %d", gotEvents, tt.wantEvents) + } + }) + } +} diff --git a/cmd/k8s-operator/ingress_tls.go b/cmd/k8s-operator/ingress_tls.go new file mode 100644 index 0000000000000..18b423f2babd7 --- /dev/null +++ b/cmd/k8s-operator/ingress_tls.go @@ -0,0 +1,175 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" + "tailscale.com/util/mak" +) + +const indexIngressTLSSecret = ".spec.tls.secretName" + +type ingressCustomTLS struct { + host string + secretName string + secret *corev1.Secret +} + +func customTLSForIngress(ctx context.Context, cl client.Client, ing *networkingv1.Ingress) (*ingressCustomTLS, error) { + host := ingressTLSHost(ing) + if host == "" || len(ing.Spec.TLS) == 0 || ing.Spec.TLS[0].SecretName == "" { + return nil, nil + } + + secret := &corev1.Secret{} + if err := cl.Get(ctx, client.ObjectKey{Namespace: ing.Namespace, Name: ing.Spec.TLS[0].SecretName}, secret); err != nil { + return nil, fmt.Errorf("getting TLS Secret %s/%s: %w", ing.Namespace, ing.Spec.TLS[0].SecretName, err) + } + if len(secret.Data[corev1.TLSCertKey]) == 0 || len(secret.Data[corev1.TLSPrivateKeyKey]) == 0 { + return nil, fmt.Errorf("TLS Secret %s/%s must contain tls.crt and tls.key data", ing.Namespace, ing.Spec.TLS[0].SecretName) + } + + return &ingressCustomTLS{ + host: host, + secretName: ing.Spec.TLS[0].SecretName, + secret: secret, + }, nil +} + +func ingressTLSHost(ing *networkingv1.Ingress) string { + if ing.Spec.TLS != nil && len(ing.Spec.TLS) > 0 && len(ing.Spec.TLS[0].Hosts) > 0 { + return ing.Spec.TLS[0].Hosts[0] + } + return "" +} + +func ingressHTTPSHost(ing *networkingv1.Ingress, defaultHost string) string { + if host := ingressTLSHost(ing); host != "" { + return host + } + return defaultHost +} + +func hasTLSSecretData(ctx context.Context, cl client.Client, ns, name string) (bool, error) { + secret := &corev1.Secret{} + err := cl.Get(ctx, client.ObjectKey{Namespace: ns, Name: name}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + return false, nil + } + return false, err + } + return len(secret.Data[corev1.TLSCertKey]) > 0 && len(secret.Data[corev1.TLSPrivateKeyKey]) > 0, nil +} + +func ingressHTTPSHosts(defaultHost string, customTLS *ingressCustomTLS) []string { + hosts := []string{defaultHost} + if customTLS != nil && customTLS.host != defaultHost { + hosts = append([]string{customTLS.host}, hosts...) + } + return hosts +} + +func copyCustomTLSSecretData(data map[string][]byte, customTLS *ingressCustomTLS) { + if customTLS == nil { + return + } + mak.Set(&data, customTLS.host+".crt", append([]byte(nil), customTLS.secret.Data[corev1.TLSCertKey]...)) + mak.Set(&data, customTLS.host+".key", append([]byte(nil), customTLS.secret.Data[corev1.TLSPrivateKeyKey]...)) +} + +func ensureCustomTLSStateSecrets(ctx context.Context, cl client.Client, namespace string, pg *tsapi.ProxyGroup, customTLS *ingressCustomTLS) error { + if customTLS == nil { + return nil + } + secrets := &corev1.SecretList{} + if err := cl.List(ctx, secrets, client.InNamespace(namespace), client.MatchingLabels(pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeState))); err != nil { + return fmt.Errorf("listing ProxyGroup state Secrets for %q: %w", pg.Name, err) + } + for i := range secrets.Items { + secret := &secrets.Items[i] + orig := secret.DeepCopy() + copyCustomTLSSecretData(secret.Data, customTLS) + if err := cl.Patch(ctx, secret, client.MergeFrom(orig)); err != nil { + return fmt.Errorf("updating ProxyGroup state Secret %s/%s: %w", namespace, secret.Name, err) + } + } + return nil +} + +func indexTLSSecretName(o client.Object) []string { + ing, ok := o.(*networkingv1.Ingress) + if !ok || len(ing.Spec.TLS) == 0 { + return nil + } + name := strings.TrimSpace(ing.Spec.TLS[0].SecretName) + if name == "" { + return nil + } + return []string{name} +} + +func ingressesFromTLSSecret(cl client.Client, logger clientLogger, ingressClassName string, requireProxyGroup bool) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + secret, ok := o.(*corev1.Secret) + if !ok { + logger.Infof("[unexpected] TLS Secret handler triggered for a non-Secret object") + return nil + } + + ingList := &networkingv1.IngressList{} + if err := cl.List(ctx, ingList, client.InNamespace(secret.Namespace), client.MatchingFields{indexIngressTLSSecret: secret.Name}); err != nil { + logger.Infof("error listing Ingresses for TLS Secret %s/%s: %v", secret.Namespace, secret.Name, err) + return nil + } + + requests := make([]reconcile.Request, 0, len(ingList.Items)) + for _, ing := range ingList.Items { + if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != ingressClassName { + continue + } + hasProxyGroup := ing.Annotations[AnnotationProxyGroup] != "" + if hasProxyGroup != requireProxyGroup { + continue + } + requests = append(requests, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)}) + } + return requests + } +} + +func markManagedTLSSecretLabels(labels map[string]string, parent client.Object) map[string]string { + out := make(map[string]string, len(labels)+3) + for key, value := range labels { + out[key] = value + } + mkParentLabels(&out, parent) + return out +} + +func mkParentLabels(labels *map[string]string, parent client.Object) { + mak.Set(labels, LabelParentType, strings.ToLower(parent.GetObjectKind().GroupVersionKind().Kind)) + mak.Set(labels, LabelParentName, parent.GetName()) + if ns := parent.GetNamespace(); ns != "" { + mak.Set(labels, LabelParentNamespace, ns) + } +} + +type clientLogger interface { + Infof(string, ...any) +} diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index d353c53337fd6..e618fc86c60d7 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -100,6 +100,7 @@ func main() { isDefaultLoadBalancer = defaultBool("OPERATOR_DEFAULT_LOAD_BALANCER", false) loginServer = strings.TrimSuffix(defaultEnv("OPERATOR_LOGIN_SERVER", ""), "/") ingressClassName = defaultEnv("OPERATOR_INGRESS_CLASS_NAME", "tailscale") + secretNamespaces = splitNamespaces(defaultEnv("OPERATOR_SECRET_NAMESPACES", "")) ) var opts []kzap.Opts @@ -160,6 +161,7 @@ func main() { tsServer: s, tsClient: tsc, tailscaleNamespace: tsNamespace, + secretNamespaces: secretNamespaces, restConfig: restConfig, proxyImage: image, k8sProxyImage: k8sProxyImage, @@ -290,8 +292,8 @@ func serviceManagedResourceFilterPredicate() predicate.Predicate { // ServiceReconciler. It blocks forever. func runReconcilers(opts reconcilerOpts) { startlog := opts.log.Named("startReconcilers") - // For secrets and statefulsets, we only get permission to touch the objects - // in the controller's own namespace. This cannot be expressed by + // For most namespaced resources, we only get permission to touch the + // objects in the controller's own namespace. This cannot be expressed by // .Watches(...) below, instead you have to add a per-type field selector to // the cache that sits a few layers below the builder stuff, which will // implicitly filter what parts of the world the builder code gets to see at @@ -299,6 +301,10 @@ func runReconcilers(opts reconcilerOpts) { nsFilter := cache.ByObject{ Field: client.InNamespace(opts.tailscaleNamespace).AsSelector(), } + secretNamespaces := watchedSecretNamespaces(opts.tailscaleNamespace, opts.secretNamespaces) + secretFilter := cache.ByObject{ + Namespaces: secretNamespaces, + } // We watch the ServiceMonitor CRD to ensure that reconcilers are re-triggered if user's workflows result in the // ServiceMonitor CRD applied after some of our resources that define ServiceMonitor creation. This selector @@ -316,7 +322,7 @@ func runReconcilers(opts reconcilerOpts) { // Other object types (e.g., EndpointSlices) can still be fetched or watched using the cached client, but they will not have any filtering applied. Cache: cache.Options{ ByObject: map[client.Object]cache.ByObject{ - &corev1.Secret{}: nsFilter, + &corev1.Secret{}: secretFilter, &corev1.ServiceAccount{}: nsFilter, &corev1.Pod{}: nsFilter, &corev1.ConfigMap{}: nsFilter, @@ -425,6 +431,7 @@ func runReconcilers(opts reconcilerOpts) { Named("ingress-reconciler"). Watches(&appsv1.StatefulSet{}, ingressChildFilter). Watches(&corev1.Secret{}, ingressChildFilter). + Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(ingressesFromTLSSecret(mgr.GetClient(), startlog, opts.ingressClassName, false))). Watches(&corev1.Service{}, svcHandlerForIngress). Watches(&tsapi.ProxyClass{}, proxyClassFilterForIngress). Complete(&IngressReconciler{ @@ -441,6 +448,9 @@ func runReconcilers(opts reconcilerOpts) { if err := mgr.GetFieldIndexer().IndexField(context.Background(), new(networkingv1.Ingress), indexIngressProxyClass, indexProxyClass); err != nil { startlog.Fatalf("failed setting up ProxyClass indexer for Ingresses: %v", err) } + if err := mgr.GetFieldIndexer().IndexField(context.Background(), new(networkingv1.Ingress), indexIngressTLSSecret, indexTLSSecretName); err != nil { + startlog.Fatalf("failed setting up TLS Secret indexer for Ingresses: %v", err) + } lc, err := opts.tsServer.LocalClient() if err != nil { @@ -457,6 +467,7 @@ func runReconcilers(opts reconcilerOpts) { Named("ingress-pg-reconciler"). Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog, opts.ingressClassName))). Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(HAIngressesFromSecret(mgr.GetClient(), startlog))). + Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(ingressesFromTLSSecret(mgr.GetClient(), startlog, opts.ingressClassName, true))). Watches(&tsapi.ProxyGroup{}, ingressProxyGroupFilter). Complete(&HAIngressReconciler{ recorder: eventRecorder, @@ -760,11 +771,39 @@ func runReconcilers(opts reconcilerOpts) { } } +func splitNamespaces(raw string) []string { + if raw == "" { + return nil + } + parts := strings.Split(raw, ",") + namespaces := make([]string, 0, len(parts)) + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "" { + continue + } + namespaces = append(namespaces, part) + } + return namespaces +} + +func watchedSecretNamespaces(operatorNamespace string, extraNamespaces []string) map[string]cache.Config { + namespaces := map[string]cache.Config{operatorNamespace: {}} + for _, ns := range extraNamespaces { + if ns == "" { + continue + } + namespaces[ns] = cache.Config{} + } + return namespaces +} + type reconcilerOpts struct { log *zap.SugaredLogger tsServer *tsnet.Server tsClient tsClient tailscaleNamespace string // namespace in which operator resources will be deployed + secretNamespaces []string // extra namespaces whose Secrets should be cached by the operator restConfig *rest.Config // config for connecting to the kube API server proxyImage string // : k8sProxyImage string // : diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 305b1738cbf81..1f0100d2e0411 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -21,6 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -34,6 +35,29 @@ import ( "tailscale.com/util/mak" ) +func TestWatchedSecretNamespaces(t *testing.T) { + t.Run("operator namespace only by default", func(t *testing.T) { + got := watchedSecretNamespaces("tailscale", nil) + want := map[string]cache.Config{"tailscale": {}} + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("watchedSecretNamespaces mismatch (-want +got):\n%s", diff) + } + }) + + t.Run("adds trimmed extra namespaces and de-dupes", func(t *testing.T) { + extra := splitNamespaces(" zergrush-system , staging-zergrush-system, tailscale ,, zergrush-system ") + got := watchedSecretNamespaces("tailscale", extra) + want := map[string]cache.Config{ + "tailscale": {}, + "zergrush-system": {}, + "staging-zergrush-system": {}, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("watchedSecretNamespaces mismatch (-want +got):\n%s", diff) + } + }) +} + func TestLoadBalancerClass(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 519f81fe0db29..c3a72d652452f 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -69,8 +69,9 @@ const ( AnnotationProxyGroup = "tailscale.com/proxy-group" // Annotations settable by users on ingresses. - AnnotationFunnel = "tailscale.com/funnel" - AnnotationHTTPRedirect = "tailscale.com/http-redirect" + AnnotationFunnel = "tailscale.com/funnel" + AnnotationHTTPRedirect = "tailscale.com/http-redirect" + AnnotationAcceptAppCaps = "tailscale.com/accept-app-caps" // If set to true, set up iptables/nftables rules in the proxy forward // cluster traffic to the tailnet IP of that proxy. This can only be set @@ -154,6 +155,12 @@ type tailscaleSTSConfig struct { // ordinal number generated by the StatefulSet. HostnamePrefix string + // CustomTLSCerts are copied into the proxy state Secret as + // .crt/.key entries so the proxy can terminate TLS for + // externally managed custom hostnames while still using Tailscale-managed + // certificates for MagicDNS endpoints. + CustomTLSCerts map[string]*corev1.Secret + // Tailnet specifies the Tailnet resource to use for producing auth keys. Tailnet string } @@ -480,6 +487,10 @@ func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, tailscale mak.Set(&secret.StringData, "serve-config", string(j)) } + for host, src := range stsC.CustomTLSCerts { + mak.Set(&secret.Data, host+".crt", append([]byte(nil), src.Data[corev1.TLSCertKey]...)) + mak.Set(&secret.Data, host+".key", append([]byte(nil), src.Data[corev1.TLSPrivateKeyKey]...)) + } if orig != nil && !apiequality.Semantic.DeepEqual(latest, orig) { logger.With("config", sanitizeConfig(latestConfig)).Debugf("patching the existing proxy Secret") @@ -502,6 +513,9 @@ func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, tailscale } for _, secret := range secrets.Items { + if secret.Labels[kubetypes.LabelSecretType] == kubetypes.LabelSecretTypeCerts { + continue + } var ordinal int32 if _, err := fmt.Sscanf(secret.Name, hsvc.Name+"-%d", &ordinal); err != nil { return nil, err @@ -722,7 +736,6 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S Value: "true", }, ) - if sts.ForwardClusterTrafficViaL7IngressProxy { container.Env = append(container.Env, corev1.EnvVar{ Name: "EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS", diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 36b608ef6f4fd..c36d353889c75 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -72,6 +72,7 @@ type configOpts struct { secretExtraData map[string][]byte resourceVersion string replicas *int32 + certShareMode string enableMetrics bool serviceMonitorLabels tsapi.Labels } @@ -180,6 +181,9 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef }) tsContainer.VolumeMounts = append(tsContainer.VolumeMounts, corev1.VolumeMount{Name: "serve-config-0", ReadOnly: true, MountPath: path.Join("/etc/tailscaled", opts.secretName)}) } + if opts.certShareMode != "" { + tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{Name: "TS_CERT_SHARE_MODE", Value: opts.certShareMode}) + } tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{ Name: "TS_INTERNAL_APP", Value: opts.app, @@ -279,21 +283,28 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps if err != nil { t.Fatal(err) } + envs := []corev1.EnvVar{ + {Name: "TS_USERSPACE", Value: "true"}, + {Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, + {Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.name"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, + {Name: "POD_UID", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.uid"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, + {Name: "TS_KUBE_SECRET", Value: "$(POD_NAME)"}, + {Name: "TS_EXPERIMENTAL_SERVICE_AUTO_ADVERTISEMENT", Value: "false"}, + {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)"}, + {Name: "TS_DEBUG_ACME_FORCE_RENEWAL", Value: "true"}, + } + if opts.certShareMode != "" { + envs = append(envs, corev1.EnvVar{Name: "TS_CERT_SHARE_MODE", Value: opts.certShareMode}) + } + envs = append(envs, + corev1.EnvVar{Name: "TS_SERVE_CONFIG", Value: "/etc/tailscaled/$(POD_NAME)/serve-config"}, + corev1.EnvVar{Name: "TS_INTERNAL_APP", Value: opts.app}, + ) + tsContainer := corev1.Container{ - Name: "tailscale", - Image: "tailscale/tailscale", - Env: []corev1.EnvVar{ - {Name: "TS_USERSPACE", Value: "true"}, - {Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, - {Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.name"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, - {Name: "POD_UID", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.uid"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, - {Name: "TS_KUBE_SECRET", Value: "$(POD_NAME)"}, - {Name: "TS_EXPERIMENTAL_SERVICE_AUTO_ADVERTISEMENT", Value: "false"}, - {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)"}, - {Name: "TS_DEBUG_ACME_FORCE_RENEWAL", Value: "true"}, - {Name: "TS_SERVE_CONFIG", Value: "/etc/tailscaled/$(POD_NAME)/serve-config"}, - {Name: "TS_INTERNAL_APP", Value: opts.app}, - }, + Name: "tailscale", + Image: "tailscale/tailscale", + Env: envs, ImagePullPolicy: "Always", VolumeMounts: []corev1.VolumeMount{ {Name: "tailscaledconfig-0", ReadOnly: true, MountPath: path.Join("/etc/tsconfig", opts.secretName)}, diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index efab9db7aad6e..a823d798043ee 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -125,13 +125,22 @@ func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string if !validLookingCertDomain(domain) { return nil, errors.New("invalid domain") } + now := b.clock.Now() + cs, err := b.getCertStore() + if err != nil { + return nil, err + } certDomain, err := b.resolveCertDomain(domain) if err != nil { + if pair, cacheErr := getCertPEMCached(cs, domain, now); cacheErr == nil { + return pair, nil + } else if cacheErr != nil && !errors.Is(cacheErr, ipn.ErrStateNotExist) { + return nil, cacheErr + } return nil, err } logf := logger.WithPrefix(b.logf, fmt.Sprintf("cert(%q): ", domain)) - now := b.clock.Now() traceACME := func(v any) { if !acmeDebug() { return @@ -140,11 +149,6 @@ func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string log.Printf("acme %T: %s", v, j) } - cs, err := b.getCertStore() - if err != nil { - return nil, err - } - if pair, err := getCertPEMCached(cs, certDomain, now); err == nil { if envknob.IsCertShareReadOnlyMode() { return pair, nil diff --git a/ipn/ipnlocal/cert_test.go b/ipn/ipnlocal/cert_test.go index cc9146ae1e055..06d4f4b9be4e6 100644 --- a/ipn/ipnlocal/cert_test.go +++ b/ipn/ipnlocal/cert_test.go @@ -581,3 +581,61 @@ func TestGetCertPEMWithValidity(t *testing.T) { }) } } + +func TestGetCertPEMWithValidityUsesCachedCustomDomain(t *testing.T) { + const ( + certDomain = "node.ts.net" + customDomain = "example.com" + ) + b := newTestLocalBackend(t) + b.varRoot = t.TempDir() + b.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2023, time.February, 20, 0, 0, 0, 0, time.UTC)}) + testRoot, err := certTestFS.ReadFile("testdata/rootCA.pem") + if err != nil { + t.Fatal(err) + } + roots := x509.NewCertPool() + if !roots.AppendCertsFromPEM(testRoot) { + t.Fatal("Unable to add test CA to the cert pool") + } + testX509Roots = roots + defer func() { testX509Roots = nil }() + + b.mu.Lock() + b.currentNode().SetNetMap(&netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{}).View(), + DNS: tailcfg.DNSConfig{ + CertDomains: []string{certDomain}, + }, + }) + b.mu.Unlock() + + certDir, err := b.certDir() + if err != nil { + t.Fatalf("certDir error: %v", err) + } + if err := os.WriteFile(filepath.Join(certDir, customDomain+".crt"), must.Get(os.ReadFile("testdata/example.com.pem")), 0644); err != nil { + t.Fatalf("writing cached cert: %v", err) + } + if err := os.WriteFile(filepath.Join(certDir, customDomain+".key"), must.Get(os.ReadFile("testdata/example.com-key.pem")), 0644); err != nil { + t.Fatalf("writing cached key: %v", err) + } + + called := false + getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf logger.Logf, traceACME func(any), domain string, now time.Time, minValidity time.Duration) (*TLSCertKeyPair, error) { + called = true + return nil, nil + } + defer func() { getCertPEM = nil }() + + pair, err := b.GetCertPEMWithValidity(context.Background(), customDomain, 0) + if err != nil { + t.Fatalf("GetCertPEMWithValidity(%q): %v", customDomain, err) + } + if pair == nil { + t.Fatalf("GetCertPEMWithValidity(%q) returned nil pair", customDomain) + } + if called { + t.Fatalf("GetCertPEMWithValidity(%q) unexpectedly attempted issuance", customDomain) + } +} diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 9460896ad8d4a..d3a60f7ec47ef 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -1048,6 +1048,7 @@ func (b *LocalBackend) addTailscaleIdentityHeaders(r *httputil.ProxyRequest) { r.Out.Header.Del("Tailscale-User-Login") r.Out.Header.Del("Tailscale-User-Name") r.Out.Header.Del("Tailscale-User-Profile-Pic") + r.Out.Header.Del("Tailscale-Caller-Tags") r.Out.Header.Del("Tailscale-Funnel-Request") r.Out.Header.Del("Tailscale-Headers-Info") @@ -1064,8 +1065,10 @@ func (b *LocalBackend) addTailscaleIdentityHeaders(r *httputil.ProxyRequest) { return // traffic from outside of Tailnet (funneled or local machine) } if node.IsTagged() { - // 2023-06-14: Not setting identity headers for tagged nodes. - // Only currently set for nodes with user identities. + tags := strings.Join(node.Tags().AsSlice(), ",") + r.Out.Header.Set("Tailscale-User-Login", encTailscaleHeaderValue(tags)) + r.Out.Header.Set("Tailscale-Caller-Tags", encTailscaleHeaderValue(tags)) + r.Out.Header.Set("Tailscale-Headers-Info", "https://tailscale.com/s/serve-headers") return } r.Out.Header.Set("Tailscale-User-Login", encTailscaleHeaderValue(user.LoginName)) @@ -1297,9 +1300,14 @@ func (b *LocalBackend) webServerConfig(hostname string, forVIPService tailcfg.Se return c, false } if forVIPService != "" { + key := ipn.HostPort(net.JoinHostPort(hostname, fmt.Sprintf("%d", port))) + if cfg, ok := b.serveConfig.FindServiceWeb(forVIPService, key); ok { + return cfg, true + } + magicDNSSuffix := b.currentNode().NetMap().MagicDNSSuffix() fqdn := strings.Join([]string{forVIPService.WithoutPrefix(), magicDNSSuffix}, ".") - key := ipn.HostPort(net.JoinHostPort(fqdn, fmt.Sprintf("%d", port))) + key = ipn.HostPort(net.JoinHostPort(fqdn, fmt.Sprintf("%d", port))) return b.serveConfig.FindServiceWeb(forVIPService, key) } key := ipn.HostPort(net.JoinHostPort(hostname, fmt.Sprintf("%d", port))) diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index b3f48b105c8f7..755a37bfa65d4 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -529,6 +529,64 @@ func TestServeConfigServices(t *testing.T) { } } +func TestServeConfigServicesCustomHost(t *testing.T) { + b := newTestBackend(t) + svcIPMapJSON, err := json.Marshal(tailcfg.ServiceIPMappings{ + "svc:foo": {netip.MustParseAddr("100.101.101.101")}, + }) + if err != nil { + t.Fatal(err) + } + b.currentNode().SetNetMap(&netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + Name: "example.ts.net", + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{tailcfg.RawMessage(svcIPMapJSON)}, + }, + }).View(), + }) + + conf := &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: {HTTPS: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.example.com:443": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Text: "ok"}, + }, + }, + }, + }, + }, + } + if err := b.SetServeConfig(conf, ""); err != nil { + t.Fatal(err) + } + + req := &http.Request{ + Host: "foo.example.com", + URL: &url.URL{Path: "/"}, + TLS: &tls.ConnectionState{ServerName: "foo.example.com"}, + } + req = req.WithContext(serveHTTPContextKey.WithValue(req.Context(), &serveHTTPContext{ + ForVIPService: "svc:foo", + DestPort: 443, + SrcAddr: netip.MustParseAddrPort("100.64.0.1:12345"), + })) + + w := httptest.NewRecorder() + b.serveWebHandler(w, req) + if w.Code != http.StatusOK { + t.Fatalf("got status %d, want %d", w.Code, http.StatusOK) + } + if body := strings.TrimSpace(w.Body.String()); body != "ok" { + t.Fatalf("got body %q, want %q", body, "ok") + } +} + func TestServeConfigETag(t *testing.T) { b := newTestBackend(t) @@ -756,10 +814,11 @@ func TestServeHTTPProxyHeaders(t *testing.T) { wantHeaders: []headerCheck{ {"X-Forwarded-Proto", "https"}, {"X-Forwarded-For", "100.150.151.153"}, - {"Tailscale-User-Login", ""}, + {"Tailscale-User-Login", "tag:server,tag:test"}, {"Tailscale-User-Name", ""}, {"Tailscale-User-Profile-Pic", ""}, - {"Tailscale-Headers-Info", ""}, + {"Tailscale-Caller-Tags", "tag:server,tag:test"}, + {"Tailscale-Headers-Info", "https://tailscale.com/s/serve-headers"}, }, }, { @@ -771,6 +830,7 @@ func TestServeHTTPProxyHeaders(t *testing.T) { {"Tailscale-User-Login", ""}, {"Tailscale-User-Name", ""}, {"Tailscale-User-Profile-Pic", ""}, + {"Tailscale-Caller-Tags", ""}, {"Tailscale-Headers-Info", ""}, }, }, @@ -897,10 +957,11 @@ func TestServeHTTPProxyGrantHeader(t *testing.T) { wantHeaders: []headerCheck{ {"X-Forwarded-Proto", "https"}, {"X-Forwarded-For", "100.150.151.153"}, - {"Tailscale-User-Login", ""}, + {"Tailscale-User-Login", "tag:server,tag:test"}, {"Tailscale-User-Name", ""}, {"Tailscale-User-Profile-Pic", ""}, - {"Tailscale-Headers-Info", ""}, + {"Tailscale-Caller-Tags", "tag:server,tag:test"}, + {"Tailscale-Headers-Info", "https://tailscale.com/s/serve-headers"}, {"Tailscale-App-Capabilities", `{"example.com/cap/boring":[{"role":"Viewer"}]}`}, }, }, @@ -913,6 +974,7 @@ func TestServeHTTPProxyGrantHeader(t *testing.T) { {"Tailscale-User-Login", ""}, {"Tailscale-User-Name", ""}, {"Tailscale-User-Profile-Pic", ""}, + {"Tailscale-Caller-Tags", ""}, {"Tailscale-Headers-Info", ""}, {"Tailscale-App-Capabilities", ""}, }, @@ -951,6 +1013,126 @@ func TestServeHTTPProxyGrantHeader(t *testing.T) { } } +func TestServeHTTPProxyGrantHeaderForVIPService(t *testing.T) { + b := newTestBackend(t) + + nm := b.NetMap() + matches, err := filter.MatchesFromFilterRules([]tailcfg.FilterRule{ + { + SrcIPs: []string{"100.150.151.152"}, + CapGrant: []tailcfg.CapGrant{{ + Dsts: []netip.Prefix{ + netip.MustParsePrefix("100.150.151.151/32"), + }, + CapMap: tailcfg.PeerCapMap{ + "example.com/cap/interesting": []tailcfg.RawMessage{ + `{"role": "🐿"}`, + }, + }, + }}, + }, + { + SrcIPs: []string{"100.150.151.153"}, + CapGrant: []tailcfg.CapGrant{{ + Dsts: []netip.Prefix{ + netip.MustParsePrefix("100.150.151.151/32"), + }, + CapMap: tailcfg.PeerCapMap{ + "example.com/cap/boring": []tailcfg.RawMessage{ + `{"role": "Viewer"}`, + }, + "example.com/cap/irrelevant": []tailcfg.RawMessage{ + `{"role": "Editor"}`, + }, + }, + }}, + }, + }) + if err != nil { + t.Fatal(err) + } + nm.PacketFilter = matches + b.SetControlClientStatus(nil, controlclient.Status{NetMap: nm}) + + testServ := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for key, val := range r.Header { + w.Header().Add(key, strings.Join(val, ",")) + } + })) + defer testServ.Close() + + conf := &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: {HTTPS: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.example.com:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": { + Proxy: testServ.URL, + AcceptAppCaps: []tailcfg.PeerCapability{"example.com/cap/interesting", "example.com/cap/boring"}, + }, + }}, + }, + }, + }, + } + if err := b.SetServeConfig(conf, ""); err != nil { + t.Fatal(err) + } + + tests := []struct { + name string + srcIP string + wantCap string + }{ + { + name: "request-from-user-within-tailnet", + srcIP: "100.150.151.152", + wantCap: `{"example.com/cap/interesting":[{"role":"🐿"}]}`, + }, + { + name: "request-from-tagged-node-within-tailnet", + srcIP: "100.150.151.153", + wantCap: `{"example.com/cap/boring":[{"role":"Viewer"}]}`, + }, + { + name: "request-from-outside-tailnet", + srcIP: "100.160.161.162", + wantCap: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := &http.Request{ + Host: "foo.example.com", + URL: &url.URL{Path: "/"}, + TLS: &tls.ConnectionState{ServerName: "foo.example.com"}, + } + req = req.WithContext(serveHTTPContextKey.WithValue(req.Context(), &serveHTTPContext{ + ForVIPService: "svc:foo", + DestPort: 443, + SrcAddr: netip.MustParseAddrPort(tt.srcIP + ":1234"), + })) + + w := httptest.NewRecorder() + b.serveWebHandler(w, req) + + dec := new(mime.WordDecoder) + maybeEncoded := w.Result().Header.Get("Tailscale-App-Capabilities") + got, err := dec.DecodeHeader(maybeEncoded) + if err != nil { + t.Fatalf("invalid %q header; failed to decode: %v", maybeEncoded, err) + } + if got != tt.wantCap { + t.Errorf("invalid %q header; want=%q, got=%q", "Tailscale-App-Capabilities", tt.wantCap, got) + } + }) + } +} + func Test_reverseProxyConfiguration(t *testing.T) { b := newTestBackend(t) type test struct { diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index f7d1b90cd1e2c..fd1c17e3c8c8e 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -215,33 +215,22 @@ func (s *Store) ReadTLSCertAndKey(domain string) (cert, key []byte, err error) { secret, err := s.client.GetSecret(ctx, domain) if err != nil { if kubeclient.IsNotFoundErr(err) { - // TODO(irbekrm): we should return a more specific error - // that wraps ipn.ErrStateNotExist here. - return nil, nil, ipn.ErrStateNotExist + return s.readTLSCertAndKeyFromStateSecret(ctx, certKey, keyKey) } st, ok := err.(*kubeapi.Status) if ok && st.Code == http.StatusForbidden && (s.certShareMode == "ro" || s.certShareMode == "rw") { - // In cert share mode, we read from a dedicated Secret per domain. - // To get here, we already had a cache miss from our in-memory - // store. For write replicas, that means it wasn't available on - // start and it wasn't written since. For read replicas, that means - // it wasn't available on start and it hasn't been reloaded in the - // background. So getting a "forbidden" error is an expected - // "not found" case where we've been asked for a cert we don't - // expect to issue, and so the forbidden error reflects that the - // operator didn't assign permission for a Secret for that domain. - // - // This code path gets triggered by the admin UI's machine page, - // which queries for the node's own TLS cert existing via the - // "tls-cert-status" c2n API. - return nil, nil, ipn.ErrStateNotExist + // In cert share mode, we normally read from a dedicated Secret per + // domain. However, externally managed custom TLS certs for HA + // ingress proxies may exist only in the pod's state Secret. Fall + // back to the state Secret before treating this as a cache miss. + return s.readTLSCertAndKeyFromStateSecret(ctx, certKey, keyKey) } return nil, nil, fmt.Errorf("getting TLS Secret %q: %w", domain, err) } cert = secret.Data[keyTLSCert] key = secret.Data[keyTLSKey] if len(cert) == 0 || len(key) == 0 { - return nil, nil, ipn.ErrStateNotExist + return s.readTLSCertAndKeyFromStateSecret(ctx, certKey, keyKey) } // TODO(irbekrm): a read between these two separate writes would // get a mismatched cert and key. Allow writing both cert and @@ -260,6 +249,22 @@ func (s *Store) ReadTLSCertAndKey(domain string) (cert, key []byte, err error) { return cert, key, nil } +func (s *Store) readTLSCertAndKeyFromStateSecret(ctx context.Context, certKey, keyKey string) ([]byte, []byte, error) { + stateSecret, err := s.client.GetSecret(ctx, s.secretName) + if err != nil { + if kubeclient.IsNotFoundErr(err) { + return nil, nil, ipn.ErrStateNotExist + } + return nil, nil, fmt.Errorf("getting TLS state Secret %q: %w", s.secretName, err) + } + cert := stateSecret.Data[sanitizeKey(certKey)] + key := stateSecret.Data[sanitizeKey(keyKey)] + if len(cert) == 0 || len(key) == 0 { + return nil, nil, ipn.ErrStateNotExist + } + return cert, key, nil +} + func (s *Store) updateSecret(data map[string][]byte, secretName string) (err error) { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer func() { diff --git a/ipn/store/kubestore/store_kube_test.go b/ipn/store/kubestore/store_kube_test.go index 1e6f711d686e2..c1b6e6440c1ae 100644 --- a/ipn/store/kubestore/store_kube_test.go +++ b/ipn/store/kubestore/store_kube_test.go @@ -431,15 +431,17 @@ func TestReadTLSCertAndKey(t *testing.T) { ) tests := []struct { - name string - memoryStore map[ipn.StateKey][]byte // pre-existing memory store state - certShareMode string - domain string - secretData map[string][]byte // data to return from mock GetSecret - secretGetErr error // error to return from mock GetSecret - wantCert []byte - wantKey []byte - wantErr error + name string + memoryStore map[ipn.StateKey][]byte // pre-existing memory store state + certShareMode string + domain string + secretData map[string][]byte // data to return from mock GetSecret + secretGetErr error // error to return from mock GetSecret + secretDataByName map[string]map[string][]byte + secretGetErrByName map[string]error + wantCert []byte + wantKey []byte + wantErr error // what should end up in memory store after the store is created wantMemoryStore map[ipn.StateKey][]byte }{ @@ -488,6 +490,38 @@ func TestReadTLSCertAndKey(t *testing.T) { wantCert: []byte(testCert), wantKey: []byte(testKey), }, + { + name: "cert_share_ro_mode_fallback_to_state_secret", + certShareMode: "ro", + domain: testDomain, + secretDataByName: map[string]map[string][]byte{ + "ts-state": { + testDomain + ".crt": []byte(testCert), + testDomain + ".key": []byte(testKey), + }, + }, + secretGetErrByName: map[string]error{ + testDomain: &kubeapi.Status{Code: 404}, + }, + wantCert: []byte(testCert), + wantKey: []byte(testKey), + }, + { + name: "cert_share_rw_mode_fallback_to_state_secret", + certShareMode: "rw", + domain: testDomain, + secretDataByName: map[string]map[string][]byte{ + "ts-state": { + testDomain + ".crt": []byte(testCert), + testDomain + ".key": []byte(testKey), + }, + }, + secretGetErrByName: map[string]error{ + testDomain: &kubeapi.Status{Code: 404}, + }, + wantCert: []byte(testCert), + wantKey: []byte(testKey), + }, { name: "cert_share_ro_mode_found_in_memory", certShareMode: "ro", @@ -514,8 +548,11 @@ func TestReadTLSCertAndKey(t *testing.T) { name: "cert_share_ro_mode_forbidden", certShareMode: "ro", domain: testDomain, - secretGetErr: &kubeapi.Status{Code: 403}, - wantErr: ipn.ErrStateNotExist, + secretGetErrByName: map[string]error{ + testDomain: &kubeapi.Status{Code: 403}, + "ts-state": &kubeapi.Status{Code: 404}, + }, + wantErr: ipn.ErrStateNotExist, }, { name: "cert_share_ro_mode_empty_cert_in_secret", @@ -541,6 +578,16 @@ func TestReadTLSCertAndKey(t *testing.T) { client := &kubeclient.FakeClient{ GetSecretImpl: func(ctx context.Context, name string) (*kubeapi.Secret, error) { + if tt.secretGetErrByName != nil { + if err, ok := tt.secretGetErrByName[name]; ok { + return nil, err + } + } + if tt.secretDataByName != nil { + if data, ok := tt.secretDataByName[name]; ok { + return &kubeapi.Secret{Data: data}, nil + } + } if tt.secretGetErr != nil { return nil, tt.secretGetErr } diff --git a/kube/certs/certs.go b/kube/certs/certs.go index 4c8ac88b6b624..297484f170f75 100644 --- a/kube/certs/certs.go +++ b/kube/certs/certs.go @@ -71,6 +71,17 @@ func (cm *CertManager) EnsureCertLoops(ctx context.Context, sc *ipn.ServeConfig) } } } + if len(currentDomains) > 0 { + certDomains, err := cm.certDomains(ctx) + if err != nil { + return fmt.Errorf("error getting cert domains: %w", err) + } + for domain := range currentDomains { + if !certDomains[domain] { + delete(currentDomains, domain) + } + } + } cm.mu.Lock() defer cm.mu.Unlock() for domain := range currentDomains { @@ -94,6 +105,29 @@ func (cm *CertManager) EnsureCertLoops(ctx context.Context, sc *ipn.ServeConfig) return nil } +func (cm *CertManager) certDomains(ctx context.Context) (map[string]bool, error) { + w, err := cm.lc.WatchIPNBus(ctx, ipn.NotifyInitialNetMap) + if err != nil { + return nil, fmt.Errorf("error watching IPN bus: %w", err) + } + defer w.Close() + + for { + n, err := w.Next() + if err != nil { + return nil, err + } + if n.NetMap == nil { + continue + } + certDomains := make(map[string]bool, len(n.NetMap.DNS.CertDomains)) + for _, domain := range n.NetMap.DNS.CertDomains { + certDomains[domain] = true + } + return certDomains, nil + } +} + // runCertLoop: // - calls localAPI certificate endpoint to ensure that certs are issued for the // given domain name diff --git a/kube/certs/certs_test.go b/kube/certs/certs_test.go index f3662f6c39ad4..3bdd85430f15d 100644 --- a/kube/certs/certs_test.go +++ b/kube/certs/certs_test.go @@ -99,6 +99,20 @@ func TestEnsureCertLoops(t *testing.T) { }, initialGoroutines: 1, // only one loop for the 443 endpoint }, + { + name: "ignore_custom_tls_domains", + initialConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:my-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-app.tailnetxyz.ts.net:443": {}, + "my-app.example.com:443": {}, + }, + }, + }, + }, + initialGoroutines: 1, + }, { name: "remove_domain", initialConfig: &ipn.ServeConfig{