diff --git a/control/controlclient/map.go b/control/controlclient/map.go index 13b11d6df..1a54fc543 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -195,6 +195,10 @@ func (ms *mapSession) HandleNonKeepAliveMapResponse(ctx context.Context, resp *t ms.updateStateFromResponse(resp) + // Occasionally clean up old userprofile if it grows too much + // from e.g. ephemeral tagged nodes. + ms.cleanLastUserProfile() + if ms.tryHandleIncrementally(resp) { ms.occasionallyPrintSummary(ms.lastNetmapSummary) return nil @@ -292,7 +296,6 @@ func (ms *mapSession) updateStateFromResponse(resp *tailcfg.MapResponse) { for _, up := range resp.UserProfiles { ms.lastUserProfile[up.ID] = up } - // TODO(bradfitz): clean up old user profiles? maybe not worth it. if dm := resp.DERPMap; dm != nil { ms.vlogf("netmap: new map contains DERP map") @@ -532,6 +535,32 @@ func (ms *mapSession) addUserProfile(nm *netmap.NetworkMap, userID tailcfg.UserI } } +// cleanLastUserProfile deletes any entries from lastUserProfile +// that are not referenced by any peer or the self node. +// +// This is expensive enough that we don't do this on every message +// from the server, but only when it's grown enough to matter. +func (ms *mapSession) cleanLastUserProfile() { + if len(ms.lastUserProfile) < len(ms.peers)*2 { + // Hasn't grown enough to be worth cleaning. + return + } + + keep := set.Set[tailcfg.UserID]{} + if node := ms.lastNode; node.Valid() { + keep.Add(node.User()) + } + for _, n := range ms.peers { + keep.Add(n.User()) + keep.Add(n.Sharer()) + } + for userID := range ms.lastUserProfile { + if !keep.Contains(userID) { + delete(ms.lastUserProfile, userID) + } + } +} + var debugPatchifyPeer = envknob.RegisterBool("TS_DEBUG_PATCHIFY_PEER") // patchifyPeersChanged mutates resp to promote PeersChanged entries to PeersChangedPatch