chore: add support for blockEndpoints to configMaps (#11512)

Adds support for setting blockEndpoints on the configMaps
This commit is contained in:
Spike Curtis 2024-01-11 09:18:31 +04:00 committed by GitHub
parent 617ecbfb1f
commit 7005fb1b2f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 105 additions and 4 deletions

View File

@ -207,7 +207,11 @@ func (c *configMaps) netMapLocked() *netmap.NetworkMap {
func (c *configMaps) peerConfigLocked() []*tailcfg.Node {
out := make([]*tailcfg.Node, 0, len(c.peers))
for _, p := range c.peers {
out = append(out, p.node.Clone())
n := p.node.Clone()
if c.blockEndpoints {
n.Endpoints = nil
}
out = append(out, n)
}
return out
}
@ -228,6 +232,19 @@ func (c *configMaps) setAddresses(ips []netip.Prefix) {
c.Broadcast()
}
// setBlockEndpoints sets whether we should block configuring endpoints we learn
// from peers. It triggers a configuration of the engine if the value changes.
// nolint: revive
func (c *configMaps) setBlockEndpoints(blockEndpoints bool) {
c.L.Lock()
defer c.L.Unlock()
if c.blockEndpoints != blockEndpoints {
c.netmapDirty = true
}
c.blockEndpoints = blockEndpoints
c.Broadcast()
}
// derMapLocked returns the current DERPMap. c.L must be held
func (c *configMaps) derpMapLocked() *tailcfg.DERPMap {
m := DERPMapFromProto(c.derpMap)
@ -342,9 +359,6 @@ func (c *configMaps) updatePeerLocked(update *proto.CoordinateResponse_PeerUpdat
// to avoid random hangs while we set up the connection again after
// inactivity.
node.KeepAlive = ok && peerStatus.Active
if c.blockEndpoints {
node.Endpoints = nil
}
}
switch {
case !ok && update.Kind == proto.CoordinateResponse_PeerUpdate_NODE:

View File

@ -484,6 +484,93 @@ func TestConfigMaps_updatePeers_lost_and_found(t *testing.T) {
_ = testutil.RequireRecvCtx(ctx, t, done)
}
func TestConfigMaps_setBlockEndpoints_different(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitShort)
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
fEng := newFakeEngineConfigurable()
nodePrivateKey := key.NewNode()
nodeID := tailcfg.NodeID(5)
discoKey := key.NewDisco()
uut := newConfigMaps(logger, fEng, nodeID, nodePrivateKey, discoKey.Public(), nil)
defer uut.close()
p1ID := uuid.MustParse("10000000-0000-0000-0000-000000000000")
p1Node := newTestNode(1)
p1n, err := NodeToProto(p1Node)
require.NoError(t, err)
p1tcn, err := uut.protoNodeToTailcfg(p1n)
p1tcn.KeepAlive = true
require.NoError(t, err)
// Given: peer already exists
uut.L.Lock()
uut.peers[p1ID] = &peerLifecycle{
peerID: p1ID,
node: p1tcn,
lastHandshake: time.Date(2024, 1, 7, 12, 0, 10, 0, time.UTC),
}
uut.L.Unlock()
uut.setBlockEndpoints(true)
nm := testutil.RequireRecvCtx(ctx, t, fEng.setNetworkMap)
r := testutil.RequireRecvCtx(ctx, t, fEng.reconfig)
require.Len(t, nm.Peers, 1)
require.Len(t, nm.Peers[0].Endpoints, 0)
require.Len(t, r.wg.Peers, 1)
done := make(chan struct{})
go func() {
defer close(done)
uut.close()
}()
_ = testutil.RequireRecvCtx(ctx, t, done)
}
func TestConfigMaps_setBlockEndpoints_same(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitShort)
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
fEng := newFakeEngineConfigurable()
nodePrivateKey := key.NewNode()
nodeID := tailcfg.NodeID(5)
discoKey := key.NewDisco()
uut := newConfigMaps(logger, fEng, nodeID, nodePrivateKey, discoKey.Public(), nil)
defer uut.close()
p1ID := uuid.MustParse("10000000-0000-0000-0000-000000000000")
p1Node := newTestNode(1)
p1n, err := NodeToProto(p1Node)
require.NoError(t, err)
p1tcn, err := uut.protoNodeToTailcfg(p1n)
p1tcn.KeepAlive = true
require.NoError(t, err)
// Given: peer already exists && blockEndpoints set to true
uut.L.Lock()
uut.peers[p1ID] = &peerLifecycle{
peerID: p1ID,
node: p1tcn,
lastHandshake: time.Date(2024, 1, 7, 12, 0, 10, 0, time.UTC),
}
uut.blockEndpoints = true
uut.L.Unlock()
// Then: we don't configure
requireNeverConfigures(ctx, t, uut)
// When we set blockEndpoints to true
uut.setBlockEndpoints(true)
done := make(chan struct{})
go func() {
defer close(done)
uut.close()
}()
_ = testutil.RequireRecvCtx(ctx, t, done)
}
func expectStatusWithHandshake(
ctx context.Context, t testing.TB, fEng *fakeEngineConfigurable, k key.NodePublic, lastHandshake time.Time,
) <-chan struct{} {