cmd/k8s-operator,k8s-operator,kube/kubetypes: add an option to configure app connector via Connector spec (#13950)

* cmd/k8s-operator,k8s-operator,kube/kubetypes: add an option to configure app connector via Connector spec

Updates tailscale/tailscale#11113

Signed-off-by: Irbe Krumina <irbe@tailscale.com>
This commit is contained in:
Irbe Krumina
2024-11-11 11:43:54 +00:00
committed by GitHub
parent 6ff85846bc
commit b9ecc50ce3
11 changed files with 381 additions and 46 deletions

View File

@ -13,7 +13,8 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"errors"
"go.uber.org/zap"
xslices "golang.org/x/exp/slices"
corev1 "k8s.io/api/core/v1"
@ -58,6 +59,7 @@ type ConnectorReconciler struct {
subnetRouters set.Slice[types.UID] // for subnet routers gauge
exitNodes set.Slice[types.UID] // for exit nodes gauge
appConnectors set.Slice[types.UID] // for app connectors gauge
}
var (
@ -67,6 +69,8 @@ var (
gaugeConnectorSubnetRouterResources = clientmetric.NewGauge(kubetypes.MetricConnectorWithSubnetRouterCount)
// gaugeConnectorExitNodeResources tracks the number of Connectors currently managed by this operator instance that are exit nodes.
gaugeConnectorExitNodeResources = clientmetric.NewGauge(kubetypes.MetricConnectorWithExitNodeCount)
// gaugeConnectorAppConnectorResources tracks the number of Connectors currently managed by this operator instance that are app connectors.
gaugeConnectorAppConnectorResources = clientmetric.NewGauge(kubetypes.MetricConnectorWithAppConnectorCount)
)
func (a *ConnectorReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
@ -108,13 +112,12 @@ func (a *ConnectorReconciler) Reconcile(ctx context.Context, req reconcile.Reque
oldCnStatus := cn.Status.DeepCopy()
setStatus := func(cn *tsapi.Connector, _ tsapi.ConditionType, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) {
tsoperator.SetConnectorCondition(cn, tsapi.ConnectorReady, status, reason, message, cn.Generation, a.clock, logger)
var updateErr error
if !apiequality.Semantic.DeepEqual(oldCnStatus, cn.Status) {
// An error encountered here should get returned by the Reconcile function.
if updateErr := a.Client.Status().Update(ctx, cn); updateErr != nil {
err = errors.Wrap(err, updateErr.Error())
}
updateErr = a.Client.Status().Update(ctx, cn)
}
return res, err
return res, errors.Join(err, updateErr)
}
if !slices.Contains(cn.Finalizers, FinalizerName) {
@ -150,6 +153,9 @@ func (a *ConnectorReconciler) Reconcile(ctx context.Context, req reconcile.Reque
cn.Status.SubnetRoutes = cn.Spec.SubnetRouter.AdvertiseRoutes.Stringify()
return setStatus(cn, tsapi.ConnectorReady, metav1.ConditionTrue, reasonConnectorCreated, reasonConnectorCreated)
}
if cn.Spec.AppConnector != nil {
cn.Status.IsAppConnector = true
}
cn.Status.SubnetRoutes = ""
return setStatus(cn, tsapi.ConnectorReady, metav1.ConditionTrue, reasonConnectorCreated, reasonConnectorCreated)
}
@ -189,23 +195,37 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge
sts.Connector.routes = cn.Spec.SubnetRouter.AdvertiseRoutes.Stringify()
}
if cn.Spec.AppConnector != nil {
sts.Connector.isAppConnector = true
if len(cn.Spec.AppConnector.Routes) != 0 {
sts.Connector.routes = cn.Spec.AppConnector.Routes.Stringify()
}
}
a.mu.Lock()
if sts.Connector.isExitNode {
if cn.Spec.ExitNode {
a.exitNodes.Add(cn.UID)
} else {
a.exitNodes.Remove(cn.UID)
}
if sts.Connector.routes != "" {
if cn.Spec.SubnetRouter != nil {
a.subnetRouters.Add(cn.GetUID())
} else {
a.subnetRouters.Remove(cn.GetUID())
}
if cn.Spec.AppConnector != nil {
a.appConnectors.Add(cn.GetUID())
} else {
a.appConnectors.Remove(cn.GetUID())
}
a.mu.Unlock()
gaugeConnectorSubnetRouterResources.Set(int64(a.subnetRouters.Len()))
gaugeConnectorExitNodeResources.Set(int64(a.exitNodes.Len()))
gaugeConnectorAppConnectorResources.Set(int64(a.appConnectors.Len()))
var connectors set.Slice[types.UID]
connectors.AddSlice(a.exitNodes.Slice())
connectors.AddSlice(a.subnetRouters.Slice())
connectors.AddSlice(a.appConnectors.Slice())
gaugeConnectorResources.Set(int64(connectors.Len()))
_, err := a.ssr.Provision(ctx, logger, sts)
@ -248,12 +268,15 @@ func (a *ConnectorReconciler) maybeCleanupConnector(ctx context.Context, logger
a.mu.Lock()
a.subnetRouters.Remove(cn.UID)
a.exitNodes.Remove(cn.UID)
a.appConnectors.Remove(cn.UID)
a.mu.Unlock()
gaugeConnectorExitNodeResources.Set(int64(a.exitNodes.Len()))
gaugeConnectorSubnetRouterResources.Set(int64(a.subnetRouters.Len()))
gaugeConnectorAppConnectorResources.Set(int64(a.appConnectors.Len()))
var connectors set.Slice[types.UID]
connectors.AddSlice(a.exitNodes.Slice())
connectors.AddSlice(a.subnetRouters.Slice())
connectors.AddSlice(a.appConnectors.Slice())
gaugeConnectorResources.Set(int64(connectors.Len()))
return true, nil
}
@ -262,8 +285,14 @@ func (a *ConnectorReconciler) validate(cn *tsapi.Connector) error {
// Connector fields are already validated at apply time with CEL validation
// on custom resource fields. The checks here are a backup in case the
// CEL validation breaks without us noticing.
if !(cn.Spec.SubnetRouter != nil || cn.Spec.ExitNode) {
return errors.New("invalid spec: a Connector must expose subnet routes or act as an exit node (or both)")
if cn.Spec.SubnetRouter == nil && !cn.Spec.ExitNode && cn.Spec.AppConnector == nil {
return errors.New("invalid spec: a Connector must be configured as at least one of subnet router, exit node or app connector")
}
if (cn.Spec.SubnetRouter != nil || cn.Spec.ExitNode) && cn.Spec.AppConnector != nil {
return errors.New("invalid spec: a Connector that is configured as an app connector must not be also configured as a subnet router or exit node")
}
if cn.Spec.AppConnector != nil {
return validateAppConnector(cn.Spec.AppConnector)
}
if cn.Spec.SubnetRouter == nil {
return nil
@ -272,19 +301,27 @@ func (a *ConnectorReconciler) validate(cn *tsapi.Connector) error {
}
func validateSubnetRouter(sb *tsapi.SubnetRouter) error {
if len(sb.AdvertiseRoutes) < 1 {
if len(sb.AdvertiseRoutes) == 0 {
return errors.New("invalid subnet router spec: no routes defined")
}
var err error
for _, route := range sb.AdvertiseRoutes {
return validateRoutes(sb.AdvertiseRoutes)
}
func validateAppConnector(ac *tsapi.AppConnector) error {
return validateRoutes(ac.Routes)
}
func validateRoutes(routes tsapi.Routes) error {
var errs []error
for _, route := range routes {
pfx, e := netip.ParsePrefix(string(route))
if e != nil {
err = errors.Wrap(err, fmt.Sprintf("route %s is invalid: %v", route, err))
errs = append(errs, fmt.Errorf("route %v is invalid: %v", route, e))
continue
}
if pfx.Masked() != pfx {
err = errors.Wrap(err, fmt.Sprintf("route %s has non-address bits set; expected %s", pfx, pfx.Masked()))
errs = append(errs, fmt.Errorf("route %s has non-address bits set; expected %s", pfx, pfx.Masked()))
}
}
return err
return errors.Join(errs...)
}

View File

@ -8,12 +8,14 @@ package main
import (
"context"
"testing"
"time"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
"tailscale.com/kube/kubetypes"
@ -296,3 +298,100 @@ func TestConnectorWithProxyClass(t *testing.T) {
expectReconciled(t, cr, "", "test")
expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation)
}
func TestConnectorWithAppConnector(t *testing.T) {
// Setup
cn := &tsapi.Connector{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
UID: types.UID("1234-UID"),
},
TypeMeta: metav1.TypeMeta{
Kind: tsapi.ConnectorKind,
APIVersion: "tailscale.io/v1alpha1",
},
Spec: tsapi.ConnectorSpec{
AppConnector: &tsapi.AppConnector{},
},
}
fc := fake.NewClientBuilder().
WithScheme(tsapi.GlobalScheme).
WithObjects(cn).
WithStatusSubresource(cn).
Build()
ft := &fakeTSClient{}
zl, err := zap.NewDevelopment()
if err != nil {
t.Fatal(err)
}
cl := tstest.NewClock(tstest.ClockOpts{})
fr := record.NewFakeRecorder(1)
cr := &ConnectorReconciler{
Client: fc,
clock: cl,
ssr: &tailscaleSTSReconciler{
Client: fc,
tsClient: ft,
defaultTags: []string{"tag:k8s"},
operatorNamespace: "operator-ns",
proxyImage: "tailscale/tailscale",
},
logger: zl.Sugar(),
recorder: fr,
}
// 1. Connector with app connnector is created and becomes ready
expectReconciled(t, cr, "", "test")
fullName, shortName := findGenName(t, fc, "", "test", "connector")
opts := configOpts{
stsName: shortName,
secretName: fullName,
parentType: "connector",
hostname: "test-connector",
app: kubetypes.AppConnector,
isAppConnector: true,
}
expectEqual(t, fc, expectedSecret(t, fc, opts), nil)
expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation)
// Connector's ready condition should be set to true
cn.ObjectMeta.Finalizers = append(cn.ObjectMeta.Finalizers, "tailscale.com/finalizer")
cn.Status.IsAppConnector = true
cn.Status.Conditions = []metav1.Condition{{
Type: string(tsapi.ConnectorReady),
Status: metav1.ConditionTrue,
LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)},
Reason: reasonConnectorCreated,
Message: reasonConnectorCreated,
}}
expectEqual(t, fc, cn, nil)
// 2. Connector with invalid app connector routes has status set to invalid
mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) {
conn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("1.2.3.4/5")}
})
cn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("1.2.3.4/5")}
expectReconciled(t, cr, "", "test")
cn.Status.Conditions = []metav1.Condition{{
Type: string(tsapi.ConnectorReady),
Status: metav1.ConditionFalse,
LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)},
Reason: reasonConnectorInvalid,
Message: "Connector is invalid: route 1.2.3.4/5 has non-address bits set; expected 0.0.0.0/5",
}}
expectEqual(t, fc, cn, nil)
// 3. Connector with valid app connnector routes becomes ready
mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) {
conn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("10.88.2.21/32")}
})
cn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("10.88.2.21/32")}
cn.Status.Conditions = []metav1.Condition{{
Type: string(tsapi.ConnectorReady),
Status: metav1.ConditionTrue,
LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)},
Reason: reasonConnectorCreated,
Message: reasonConnectorCreated,
}}
expectReconciled(t, cr, "", "test")
}

View File

@ -24,6 +24,10 @@ spec:
jsonPath: .status.isExitNode
name: IsExitNode
type: string
- description: Whether this Connector instance is an app connector.
jsonPath: .status.isAppConnector
name: IsAppConnector
type: string
- description: Status of the deployed Connector resources.
jsonPath: .status.conditions[?(@.type == "ConnectorReady")].reason
name: Status
@ -66,10 +70,40 @@ spec:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
type: object
properties:
appConnector:
description: |-
AppConnector defines whether the Connector device should act as a Tailscale app connector. A Connector that is
configured as an app connector cannot be a subnet router or an exit node. If this field is unset, the
Connector does not act as an app connector.
Note that you will need to manually configure the permissions and the domains for the app connector via the
Admin panel.
Note also that the main tested and supported use case of this config option is to deploy an app connector on
Kubernetes to access SaaS applications available on the public internet. Using the app connector to expose
cluster workloads or other internal workloads to tailnet might work, but this is not a use case that we have
tested or optimised for.
If you are using the app connector to access SaaS applications because you need a predictable egress IP that
can be whitelisted, it is also your responsibility to ensure that cluster traffic from the connector flows
via that predictable IP, for example by enforcing that cluster egress traffic is routed via an egress NAT
device with a static IP address.
https://tailscale.com/kb/1281/app-connectors
type: object
properties:
routes:
description: |-
Routes are optional preconfigured routes for the domains routed via the app connector.
If not set, routes for the domains will be discovered dynamically.
If set, the app connector will immediately be able to route traffic using the preconfigured routes, but may
also dynamically discover other routes.
https://tailscale.com/kb/1332/apps-best-practices#preconfiguration
type: array
minItems: 1
items:
type: string
format: cidr
exitNode:
description: |-
ExitNode defines whether the Connector node should act as a
Tailscale exit node. Defaults to false.
ExitNode defines whether the Connector device should act as a Tailscale exit node. Defaults to false.
This field is mutually exclusive with the appConnector field.
https://tailscale.com/kb/1103/exit-nodes
type: boolean
hostname:
@ -90,9 +124,11 @@ spec:
type: string
subnetRouter:
description: |-
SubnetRouter defines subnet routes that the Connector node should
expose to tailnet. If unset, none are exposed.
SubnetRouter defines subnet routes that the Connector device should
expose to tailnet as a Tailscale subnet router.
https://tailscale.com/kb/1019/subnets/
If this field is unset, the device does not get configured as a Tailscale subnet router.
This field is mutually exclusive with the appConnector field.
type: object
required:
- advertiseRoutes
@ -125,8 +161,10 @@ spec:
type: string
pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$
x-kubernetes-validations:
- rule: has(self.subnetRouter) || self.exitNode == true
message: A Connector needs to be either an exit node or a subnet router, or both.
- rule: has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true) || has(self.appConnector)
message: A Connector needs to have at least one of exit node, subnet router or app connector configured.
- rule: '!((has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true)) && has(self.appConnector))'
message: The appConnector field is mutually exclusive with exitNode and subnetRouter fields.
status:
description: |-
ConnectorStatus describes the status of the Connector. This is set
@ -200,6 +238,9 @@ spec:
If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
node.
type: string
isAppConnector:
description: IsAppConnector is set to true if the Connector acts as an app connector.
type: boolean
isExitNode:
description: IsExitNode is set to true if the Connector acts as an exit node.
type: boolean

View File

@ -53,6 +53,10 @@ spec:
jsonPath: .status.isExitNode
name: IsExitNode
type: string
- description: Whether this Connector instance is an app connector.
jsonPath: .status.isAppConnector
name: IsAppConnector
type: string
- description: Status of the deployed Connector resources.
jsonPath: .status.conditions[?(@.type == "ConnectorReady")].reason
name: Status
@ -91,10 +95,40 @@ spec:
More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
properties:
appConnector:
description: |-
AppConnector defines whether the Connector device should act as a Tailscale app connector. A Connector that is
configured as an app connector cannot be a subnet router or an exit node. If this field is unset, the
Connector does not act as an app connector.
Note that you will need to manually configure the permissions and the domains for the app connector via the
Admin panel.
Note also that the main tested and supported use case of this config option is to deploy an app connector on
Kubernetes to access SaaS applications available on the public internet. Using the app connector to expose
cluster workloads or other internal workloads to tailnet might work, but this is not a use case that we have
tested or optimised for.
If you are using the app connector to access SaaS applications because you need a predictable egress IP that
can be whitelisted, it is also your responsibility to ensure that cluster traffic from the connector flows
via that predictable IP, for example by enforcing that cluster egress traffic is routed via an egress NAT
device with a static IP address.
https://tailscale.com/kb/1281/app-connectors
properties:
routes:
description: |-
Routes are optional preconfigured routes for the domains routed via the app connector.
If not set, routes for the domains will be discovered dynamically.
If set, the app connector will immediately be able to route traffic using the preconfigured routes, but may
also dynamically discover other routes.
https://tailscale.com/kb/1332/apps-best-practices#preconfiguration
items:
format: cidr
type: string
minItems: 1
type: array
type: object
exitNode:
description: |-
ExitNode defines whether the Connector node should act as a
Tailscale exit node. Defaults to false.
ExitNode defines whether the Connector device should act as a Tailscale exit node. Defaults to false.
This field is mutually exclusive with the appConnector field.
https://tailscale.com/kb/1103/exit-nodes
type: boolean
hostname:
@ -115,9 +149,11 @@ spec:
type: string
subnetRouter:
description: |-
SubnetRouter defines subnet routes that the Connector node should
expose to tailnet. If unset, none are exposed.
SubnetRouter defines subnet routes that the Connector device should
expose to tailnet as a Tailscale subnet router.
https://tailscale.com/kb/1019/subnets/
If this field is unset, the device does not get configured as a Tailscale subnet router.
This field is mutually exclusive with the appConnector field.
properties:
advertiseRoutes:
description: |-
@ -151,8 +187,10 @@ spec:
type: array
type: object
x-kubernetes-validations:
- message: A Connector needs to be either an exit node or a subnet router, or both.
rule: has(self.subnetRouter) || self.exitNode == true
- message: A Connector needs to have at least one of exit node, subnet router or app connector configured.
rule: has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true) || has(self.appConnector)
- message: The appConnector field is mutually exclusive with exitNode and subnetRouter fields.
rule: '!((has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true)) && has(self.appConnector))'
status:
description: |-
ConnectorStatus describes the status of the Connector. This is set
@ -225,6 +263,9 @@ spec:
If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
node.
type: string
isAppConnector:
description: IsAppConnector is set to true if the Connector acts as an app connector.
type: boolean
isExitNode:
description: IsExitNode is set to true if the Connector acts as an exit node.
type: boolean

View File

@ -1388,7 +1388,7 @@ func TestTailscaledConfigfileHash(t *testing.T) {
parentType: "svc",
hostname: "default-test",
clusterTargetIP: "10.20.30.40",
confFileHash: "e09bededa0379920141cbd0b0dbdf9b8b66545877f9e8397423f5ce3e1ba439e",
confFileHash: "362360188dac62bca8013c8134929fed8efd84b1f410c00873d14a05709b5647",
app: kubetypes.AppIngressProxy,
}
expectEqual(t, fc, expectedSTS(t, fc, o), nil)
@ -1399,7 +1399,7 @@ func TestTailscaledConfigfileHash(t *testing.T) {
mak.Set(&svc.Annotations, AnnotationHostname, "another-test")
})
o.hostname = "another-test"
o.confFileHash = "5d754cf55463135ee34aa9821f2fd8483b53eb0570c3740c84a086304f427684"
o.confFileHash = "20db57cfabc3fc6490f6bb1dc85994e61d255cdfa2a56abb0141736e59f263ef"
expectReconciled(t, sr, "default", "test")
expectEqual(t, fc, expectedSTS(t, fc, o), nil)
}

View File

@ -132,10 +132,13 @@ type tailscaleSTSConfig struct {
}
type connector struct {
// routes is a list of subnet routes that this Connector should expose.
// routes is a list of routes that this Connector should advertise either as a subnet router or as an app
// connector.
routes string
// isExitNode defines whether this Connector should act as an exit node.
isExitNode bool
// isAppConnector defines whether this Connector should act as an app connector.
isAppConnector bool
}
type tsnetServer interface {
CertDomains() []string
@ -674,7 +677,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet,
}
if stsCfg != nil && pc.Spec.Metrics != nil && pc.Spec.Metrics.Enable {
if stsCfg.TailnetTargetFQDN == "" && stsCfg.TailnetTargetIP == "" && !stsCfg.ForwardClusterTrafficViaL7IngressProxy {
enableMetrics(ss, pc)
enableMetrics(ss)
} else if stsCfg.ForwardClusterTrafficViaL7IngressProxy {
// TODO (irbekrm): fix this
// For Ingress proxies that have been configured with
@ -763,7 +766,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet,
return ss
}
func enableMetrics(ss *appsv1.StatefulSet, pc *tsapi.ProxyClass) {
func enableMetrics(ss *appsv1.StatefulSet) {
for i, c := range ss.Spec.Template.Spec.Containers {
if c.Name == "tailscale" {
// Serve metrics on on <pod-ip>:9001/debug/metrics. If
@ -803,11 +806,13 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co
Locked: "false",
Hostname: &stsC.Hostname,
NoStatefulFiltering: "false",
AppConnector: &ipn.AppConnectorPrefs{Advertise: false},
}
// For egress proxies only, we need to ensure that stateful filtering is
// not in place so that traffic from cluster can be forwarded via
// Tailscale IPs.
// TODO (irbekrm): set it to true always as this is now the default in core.
if stsC.TailnetTargetFQDN != "" || stsC.TailnetTargetIP != "" {
conf.NoStatefulFiltering = "true"
}
@ -817,6 +822,9 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co
return nil, fmt.Errorf("error calculating routes: %w", err)
}
conf.AdvertiseRoutes = routes
if stsC.Connector.isAppConnector {
conf.AppConnector.Advertise = true
}
}
if shouldAcceptRoutes(stsC.ProxyClass) {
conf.AcceptRoutes = "true"
@ -831,9 +839,15 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co
}
conf.AuthKey = key
}
capVerConfigs := make(map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha)
capVerConfigs[107] = *conf
// AppConnector config option is only understood by clients of capver 107 and newer.
conf.AppConnector = nil
capVerConfigs[95] = *conf
// legacy config should not contain NoStatefulFiltering field.
// StatefulFiltering is only understood by clients of capver 95 and newer.
conf.NoStatefulFiltering.Clear()
capVerConfigs[94] = *conf
return capVerConfigs, nil

View File

@ -48,6 +48,7 @@ type configOpts struct {
clusterTargetDNS string
subnetRoutes string
isExitNode bool
isAppConnector bool
confFileHash string
serveConfig *ipn.ServeConfig
shouldEnableForwardingClusterTrafficViaIngress bool
@ -356,6 +357,7 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec
Locked: "false",
AuthKey: ptr.To("secret-authkey"),
AcceptRoutes: "false",
AppConnector: &ipn.AppConnectorPrefs{Advertise: false},
}
if opts.proxyClass != "" {
t.Logf("applying configuration from ProxyClass %s", opts.proxyClass)
@ -370,6 +372,9 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec
if opts.shouldRemoveAuthKey {
conf.AuthKey = nil
}
if opts.isAppConnector {
conf.AppConnector = &ipn.AppConnectorPrefs{Advertise: true}
}
var routes []netip.Prefix
if opts.subnetRoutes != "" || opts.isExitNode {
r := opts.subnetRoutes
@ -384,22 +389,29 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec
routes = append(routes, prefix)
}
}
conf.AdvertiseRoutes = routes
b, err := json.Marshal(conf)
if err != nil {
t.Fatalf("error marshalling tailscaled config")
}
if opts.tailnetTargetFQDN != "" || opts.tailnetTargetIP != "" {
conf.NoStatefulFiltering = "true"
} else {
conf.NoStatefulFiltering = "false"
}
conf.AdvertiseRoutes = routes
bnn, err := json.Marshal(conf)
if err != nil {
t.Fatalf("error marshalling tailscaled config")
}
conf.AppConnector = nil
bn, err := json.Marshal(conf)
if err != nil {
t.Fatalf("error marshalling tailscaled config")
}
conf.NoStatefulFiltering.Clear()
b, err := json.Marshal(conf)
if err != nil {
t.Fatalf("error marshalling tailscaled config")
}
mak.Set(&s.StringData, "tailscaled", string(b))
mak.Set(&s.StringData, "cap-95.hujson", string(bn))
mak.Set(&s.StringData, "cap-107.hujson", string(bnn))
labels := map[string]string{
"tailscale.com/managed": "true",
"tailscale.com/parent-resource": "test",
@ -674,5 +686,17 @@ func removeAuthKeyIfExistsModifier(t *testing.T) func(s *corev1.Secret) {
}
mak.Set(&secret.StringData, "cap-95.hujson", string(b))
}
if len(secret.StringData["cap-107.hujson"]) != 0 {
conf := &ipn.ConfigVAlpha{}
if err := json.Unmarshal([]byte(secret.StringData["cap-107.hujson"]), conf); err != nil {
t.Fatalf("error umarshalling 'cap-107.hujson' contents: %v", err)
}
conf.AuthKey = nil
b, err := json.Marshal(conf)
if err != nil {
t.Fatalf("error marshalling 'cap-107.huson' contents: %v", err)
}
mak.Set(&secret.StringData, "cap-107.hujson", string(b))
}
}
}