2022-09-01 01:09:44 +00:00
|
|
|
package tailnet_test
|
|
|
|
|
|
|
|
import (
|
2023-05-05 16:29:03 +00:00
|
|
|
"context"
|
2023-05-02 16:58:21 +00:00
|
|
|
"encoding/json"
|
2022-09-01 01:09:44 +00:00
|
|
|
"net"
|
2023-05-05 16:29:03 +00:00
|
|
|
"net/http"
|
|
|
|
"net/http/httptest"
|
2024-03-01 15:02:33 +00:00
|
|
|
"net/netip"
|
2024-01-22 11:26:20 +00:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2022-09-01 01:09:44 +00:00
|
|
|
"testing"
|
2023-05-02 16:58:21 +00:00
|
|
|
"time"
|
2022-09-01 01:09:44 +00:00
|
|
|
|
|
|
|
"github.com/google/uuid"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
2024-01-22 11:26:20 +00:00
|
|
|
"go.uber.org/mock/gomock"
|
|
|
|
"nhooyr.io/websocket"
|
|
|
|
"tailscale.com/tailcfg"
|
|
|
|
"tailscale.com/types/key"
|
2022-09-01 01:09:44 +00:00
|
|
|
|
2024-01-22 11:26:20 +00:00
|
|
|
"cdr.dev/slog"
|
|
|
|
"cdr.dev/slog/sloggers/slogtest"
|
2024-03-26 17:44:31 +00:00
|
|
|
"github.com/coder/coder/v2/codersdk/workspacesdk"
|
2023-08-18 18:55:43 +00:00
|
|
|
"github.com/coder/coder/v2/tailnet"
|
2024-01-22 11:26:20 +00:00
|
|
|
"github.com/coder/coder/v2/tailnet/proto"
|
|
|
|
"github.com/coder/coder/v2/tailnet/tailnettest"
|
2023-12-06 11:04:28 +00:00
|
|
|
"github.com/coder/coder/v2/tailnet/test"
|
2023-08-18 18:55:43 +00:00
|
|
|
"github.com/coder/coder/v2/testutil"
|
2022-09-01 01:09:44 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestCoordinator(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
t.Run("ClientWithoutAgent", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2023-04-27 09:59:01 +00:00
|
|
|
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
2023-12-06 11:04:28 +00:00
|
|
|
ctx := testutil.Context(t, testutil.WaitMedium)
|
2023-04-27 09:59:01 +00:00
|
|
|
coordinator := tailnet.NewCoordinator(logger)
|
2023-12-06 11:04:28 +00:00
|
|
|
defer func() {
|
|
|
|
err := coordinator.Close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
}()
|
2022-09-01 01:09:44 +00:00
|
|
|
client, server := net.Pipe()
|
|
|
|
sendNode, errChan := tailnet.ServeCoordinator(client, func(node []*tailnet.Node) error {
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
id := uuid.New()
|
|
|
|
closeChan := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
err := coordinator.ServeClient(server, id, uuid.New())
|
|
|
|
assert.NoError(t, err)
|
|
|
|
close(closeChan)
|
|
|
|
}()
|
2024-03-01 15:02:33 +00:00
|
|
|
sendNode(&tailnet.Node{
|
|
|
|
Addresses: []netip.Prefix{
|
|
|
|
netip.PrefixFrom(tailnet.IP(), 128),
|
|
|
|
},
|
|
|
|
PreferredDERP: 10,
|
|
|
|
})
|
2022-09-01 01:09:44 +00:00
|
|
|
require.Eventually(t, func() bool {
|
|
|
|
return coordinator.Node(id) != nil
|
|
|
|
}, testutil.WaitShort, testutil.IntervalFast)
|
2022-10-17 13:43:30 +00:00
|
|
|
require.NoError(t, client.Close())
|
|
|
|
require.NoError(t, server.Close())
|
2023-12-06 11:04:28 +00:00
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, errChan)
|
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, closeChan)
|
2022-09-01 01:09:44 +00:00
|
|
|
})
|
|
|
|
|
2024-03-01 15:02:33 +00:00
|
|
|
t.Run("ClientWithoutAgent_InvalidIPBits", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
|
|
|
|
ctx := testutil.Context(t, testutil.WaitMedium)
|
|
|
|
coordinator := tailnet.NewCoordinator(logger)
|
|
|
|
defer func() {
|
|
|
|
err := coordinator.Close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
}()
|
|
|
|
client, server := net.Pipe()
|
|
|
|
sendNode, errChan := tailnet.ServeCoordinator(client, func(node []*tailnet.Node) error {
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
id := uuid.New()
|
|
|
|
closeChan := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
err := coordinator.ServeClient(server, id, uuid.New())
|
|
|
|
assert.NoError(t, err)
|
|
|
|
close(closeChan)
|
|
|
|
}()
|
|
|
|
sendNode(&tailnet.Node{
|
|
|
|
Addresses: []netip.Prefix{
|
|
|
|
netip.PrefixFrom(tailnet.IP(), 64),
|
|
|
|
},
|
|
|
|
PreferredDERP: 10,
|
|
|
|
})
|
|
|
|
|
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, errChan)
|
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, closeChan)
|
|
|
|
})
|
|
|
|
|
2022-09-01 01:09:44 +00:00
|
|
|
t.Run("AgentWithoutClients", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2023-04-27 09:59:01 +00:00
|
|
|
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
2023-12-06 11:04:28 +00:00
|
|
|
ctx := testutil.Context(t, testutil.WaitMedium)
|
2023-04-27 09:59:01 +00:00
|
|
|
coordinator := tailnet.NewCoordinator(logger)
|
2023-12-06 11:04:28 +00:00
|
|
|
defer func() {
|
|
|
|
err := coordinator.Close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
}()
|
2022-09-01 01:09:44 +00:00
|
|
|
client, server := net.Pipe()
|
|
|
|
sendNode, errChan := tailnet.ServeCoordinator(client, func(node []*tailnet.Node) error {
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
id := uuid.New()
|
|
|
|
closeChan := make(chan struct{})
|
|
|
|
go func() {
|
2023-01-25 21:27:36 +00:00
|
|
|
err := coordinator.ServeAgent(server, id, "")
|
2022-09-01 01:09:44 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
close(closeChan)
|
|
|
|
}()
|
2024-03-01 15:02:33 +00:00
|
|
|
sendNode(&tailnet.Node{
|
|
|
|
Addresses: []netip.Prefix{
|
|
|
|
netip.PrefixFrom(tailnet.IPFromUUID(id), 128),
|
|
|
|
},
|
|
|
|
PreferredDERP: 10,
|
|
|
|
})
|
2022-09-01 01:09:44 +00:00
|
|
|
require.Eventually(t, func() bool {
|
|
|
|
return coordinator.Node(id) != nil
|
|
|
|
}, testutil.WaitShort, testutil.IntervalFast)
|
|
|
|
err := client.Close()
|
|
|
|
require.NoError(t, err)
|
2023-12-06 11:04:28 +00:00
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, errChan)
|
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, closeChan)
|
2022-09-01 01:09:44 +00:00
|
|
|
})
|
|
|
|
|
2024-03-01 15:02:33 +00:00
|
|
|
t.Run("AgentWithoutClients_ValidIPLegacy", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
|
|
|
ctx := testutil.Context(t, testutil.WaitMedium)
|
|
|
|
coordinator := tailnet.NewCoordinator(logger)
|
|
|
|
defer func() {
|
|
|
|
err := coordinator.Close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
}()
|
|
|
|
client, server := net.Pipe()
|
|
|
|
sendNode, errChan := tailnet.ServeCoordinator(client, func(node []*tailnet.Node) error {
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
id := uuid.New()
|
|
|
|
closeChan := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
err := coordinator.ServeAgent(server, id, "")
|
|
|
|
assert.NoError(t, err)
|
|
|
|
close(closeChan)
|
|
|
|
}()
|
|
|
|
sendNode(&tailnet.Node{
|
|
|
|
Addresses: []netip.Prefix{
|
2024-03-26 17:44:31 +00:00
|
|
|
netip.PrefixFrom(workspacesdk.AgentIP, 128),
|
2024-03-01 15:02:33 +00:00
|
|
|
},
|
|
|
|
PreferredDERP: 10,
|
|
|
|
})
|
|
|
|
require.Eventually(t, func() bool {
|
|
|
|
return coordinator.Node(id) != nil
|
|
|
|
}, testutil.WaitShort, testutil.IntervalFast)
|
|
|
|
err := client.Close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, errChan)
|
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, closeChan)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("AgentWithoutClients_InvalidIP", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
|
|
|
|
ctx := testutil.Context(t, testutil.WaitMedium)
|
|
|
|
coordinator := tailnet.NewCoordinator(logger)
|
|
|
|
defer func() {
|
|
|
|
err := coordinator.Close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
}()
|
|
|
|
client, server := net.Pipe()
|
|
|
|
sendNode, errChan := tailnet.ServeCoordinator(client, func(node []*tailnet.Node) error {
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
id := uuid.New()
|
|
|
|
closeChan := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
err := coordinator.ServeAgent(server, id, "")
|
|
|
|
assert.NoError(t, err)
|
|
|
|
close(closeChan)
|
|
|
|
}()
|
|
|
|
sendNode(&tailnet.Node{
|
|
|
|
Addresses: []netip.Prefix{
|
|
|
|
netip.PrefixFrom(tailnet.IP(), 128),
|
|
|
|
},
|
|
|
|
PreferredDERP: 10,
|
|
|
|
})
|
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, errChan)
|
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, closeChan)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("AgentWithoutClients_InvalidBits", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
|
|
|
|
ctx := testutil.Context(t, testutil.WaitMedium)
|
|
|
|
coordinator := tailnet.NewCoordinator(logger)
|
|
|
|
defer func() {
|
|
|
|
err := coordinator.Close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
}()
|
|
|
|
client, server := net.Pipe()
|
|
|
|
sendNode, errChan := tailnet.ServeCoordinator(client, func(node []*tailnet.Node) error {
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
id := uuid.New()
|
|
|
|
closeChan := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
err := coordinator.ServeAgent(server, id, "")
|
|
|
|
assert.NoError(t, err)
|
|
|
|
close(closeChan)
|
|
|
|
}()
|
|
|
|
sendNode(&tailnet.Node{
|
|
|
|
Addresses: []netip.Prefix{
|
|
|
|
netip.PrefixFrom(tailnet.IPFromUUID(id), 64),
|
|
|
|
},
|
|
|
|
PreferredDERP: 10,
|
|
|
|
})
|
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, errChan)
|
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, closeChan)
|
|
|
|
})
|
|
|
|
|
2022-09-01 01:09:44 +00:00
|
|
|
t.Run("AgentWithClient", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2023-04-27 09:59:01 +00:00
|
|
|
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
|
|
|
coordinator := tailnet.NewCoordinator(logger)
|
2023-12-06 11:04:28 +00:00
|
|
|
defer func() {
|
|
|
|
err := coordinator.Close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
}()
|
2022-09-01 01:09:44 +00:00
|
|
|
|
2023-05-05 16:29:03 +00:00
|
|
|
// in this test we use real websockets to test use of deadlines
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong)
|
|
|
|
defer cancel()
|
|
|
|
agentWS, agentServerWS := websocketConn(ctx, t)
|
2022-09-01 01:09:44 +00:00
|
|
|
defer agentWS.Close()
|
|
|
|
agentNodeChan := make(chan []*tailnet.Node)
|
|
|
|
sendAgentNode, agentErrChan := tailnet.ServeCoordinator(agentWS, func(nodes []*tailnet.Node) error {
|
|
|
|
agentNodeChan <- nodes
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
agentID := uuid.New()
|
|
|
|
closeAgentChan := make(chan struct{})
|
|
|
|
go func() {
|
2023-01-25 21:27:36 +00:00
|
|
|
err := coordinator.ServeAgent(agentServerWS, agentID, "")
|
2022-09-01 01:09:44 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
close(closeAgentChan)
|
|
|
|
}()
|
2023-06-21 12:20:58 +00:00
|
|
|
sendAgentNode(&tailnet.Node{PreferredDERP: 1})
|
2022-09-01 01:09:44 +00:00
|
|
|
require.Eventually(t, func() bool {
|
|
|
|
return coordinator.Node(agentID) != nil
|
|
|
|
}, testutil.WaitShort, testutil.IntervalFast)
|
|
|
|
|
2023-05-05 16:29:03 +00:00
|
|
|
clientWS, clientServerWS := websocketConn(ctx, t)
|
2022-09-01 01:09:44 +00:00
|
|
|
defer clientWS.Close()
|
|
|
|
defer clientServerWS.Close()
|
|
|
|
clientNodeChan := make(chan []*tailnet.Node)
|
|
|
|
sendClientNode, clientErrChan := tailnet.ServeCoordinator(clientWS, func(nodes []*tailnet.Node) error {
|
|
|
|
clientNodeChan <- nodes
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
clientID := uuid.New()
|
|
|
|
closeClientChan := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
err := coordinator.ServeClient(clientServerWS, clientID, agentID)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
close(closeClientChan)
|
|
|
|
}()
|
2023-12-06 11:04:28 +00:00
|
|
|
agentNodes := testutil.RequireRecvCtx(ctx, t, clientNodeChan)
|
|
|
|
require.Len(t, agentNodes, 1)
|
|
|
|
|
2023-06-21 12:20:58 +00:00
|
|
|
sendClientNode(&tailnet.Node{PreferredDERP: 2})
|
2023-12-06 11:04:28 +00:00
|
|
|
clientNodes := testutil.RequireRecvCtx(ctx, t, agentNodeChan)
|
2022-09-01 01:09:44 +00:00
|
|
|
require.Len(t, clientNodes, 1)
|
|
|
|
|
2023-05-05 16:29:03 +00:00
|
|
|
// wait longer than the internal wait timeout.
|
|
|
|
// this tests for regression of https://github.com/coder/coder/issues/7428
|
|
|
|
time.Sleep(tailnet.WriteTimeout * 3 / 2)
|
|
|
|
|
2022-09-01 01:09:44 +00:00
|
|
|
// Ensure an update to the agent node reaches the client!
|
2023-06-21 12:20:58 +00:00
|
|
|
sendAgentNode(&tailnet.Node{PreferredDERP: 3})
|
2023-12-06 11:04:28 +00:00
|
|
|
agentNodes = testutil.RequireRecvCtx(ctx, t, clientNodeChan)
|
|
|
|
require.Len(t, agentNodes, 1)
|
2022-09-01 01:09:44 +00:00
|
|
|
|
|
|
|
// Close the agent WebSocket so a new one can connect.
|
|
|
|
err := agentWS.Close()
|
|
|
|
require.NoError(t, err)
|
2023-12-06 11:04:28 +00:00
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, agentErrChan)
|
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, closeAgentChan)
|
2022-09-01 01:09:44 +00:00
|
|
|
|
|
|
|
// Create a new agent connection. This is to simulate a reconnect!
|
|
|
|
agentWS, agentServerWS = net.Pipe()
|
|
|
|
defer agentWS.Close()
|
|
|
|
agentNodeChan = make(chan []*tailnet.Node)
|
|
|
|
_, agentErrChan = tailnet.ServeCoordinator(agentWS, func(nodes []*tailnet.Node) error {
|
|
|
|
agentNodeChan <- nodes
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
closeAgentChan = make(chan struct{})
|
|
|
|
go func() {
|
2023-01-25 21:27:36 +00:00
|
|
|
err := coordinator.ServeAgent(agentServerWS, agentID, "")
|
2022-09-01 01:09:44 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
close(closeAgentChan)
|
|
|
|
}()
|
2023-12-06 11:04:28 +00:00
|
|
|
// Ensure the existing listening client sends its node immediately!
|
|
|
|
clientNodes = testutil.RequireRecvCtx(ctx, t, agentNodeChan)
|
2022-09-01 01:09:44 +00:00
|
|
|
require.Len(t, clientNodes, 1)
|
|
|
|
|
|
|
|
err = agentWS.Close()
|
|
|
|
require.NoError(t, err)
|
2023-12-06 11:04:28 +00:00
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, agentErrChan)
|
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, closeAgentChan)
|
2022-09-01 01:09:44 +00:00
|
|
|
|
|
|
|
err = clientWS.Close()
|
|
|
|
require.NoError(t, err)
|
2023-12-06 11:04:28 +00:00
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, clientErrChan)
|
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, closeClientChan)
|
2022-09-01 01:09:44 +00:00
|
|
|
})
|
2023-01-23 23:22:34 +00:00
|
|
|
|
|
|
|
t.Run("AgentDoubleConnect", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2023-04-27 09:59:01 +00:00
|
|
|
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
|
|
|
coordinator := tailnet.NewCoordinator(logger)
|
2023-12-06 11:04:28 +00:00
|
|
|
ctx := testutil.Context(t, testutil.WaitLong)
|
2023-01-23 23:22:34 +00:00
|
|
|
|
|
|
|
agentWS1, agentServerWS1 := net.Pipe()
|
|
|
|
defer agentWS1.Close()
|
|
|
|
agentNodeChan1 := make(chan []*tailnet.Node)
|
|
|
|
sendAgentNode1, agentErrChan1 := tailnet.ServeCoordinator(agentWS1, func(nodes []*tailnet.Node) error {
|
2023-12-06 11:04:28 +00:00
|
|
|
t.Logf("agent1 got node update: %v", nodes)
|
2023-01-23 23:22:34 +00:00
|
|
|
agentNodeChan1 <- nodes
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
agentID := uuid.New()
|
|
|
|
closeAgentChan1 := make(chan struct{})
|
|
|
|
go func() {
|
2023-01-25 21:27:36 +00:00
|
|
|
err := coordinator.ServeAgent(agentServerWS1, agentID, "")
|
2023-01-23 23:22:34 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
close(closeAgentChan1)
|
|
|
|
}()
|
2023-06-21 12:20:58 +00:00
|
|
|
sendAgentNode1(&tailnet.Node{PreferredDERP: 1})
|
2023-01-23 23:22:34 +00:00
|
|
|
require.Eventually(t, func() bool {
|
|
|
|
return coordinator.Node(agentID) != nil
|
|
|
|
}, testutil.WaitShort, testutil.IntervalFast)
|
|
|
|
|
|
|
|
clientWS, clientServerWS := net.Pipe()
|
|
|
|
defer clientWS.Close()
|
|
|
|
defer clientServerWS.Close()
|
|
|
|
clientNodeChan := make(chan []*tailnet.Node)
|
|
|
|
sendClientNode, clientErrChan := tailnet.ServeCoordinator(clientWS, func(nodes []*tailnet.Node) error {
|
2023-12-06 11:04:28 +00:00
|
|
|
t.Logf("client got node update: %v", nodes)
|
2023-01-23 23:22:34 +00:00
|
|
|
clientNodeChan <- nodes
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
clientID := uuid.New()
|
|
|
|
closeClientChan := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
err := coordinator.ServeClient(clientServerWS, clientID, agentID)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
close(closeClientChan)
|
|
|
|
}()
|
2023-12-06 11:04:28 +00:00
|
|
|
agentNodes := testutil.RequireRecvCtx(ctx, t, clientNodeChan)
|
2023-01-23 23:22:34 +00:00
|
|
|
require.Len(t, agentNodes, 1)
|
2023-06-21 12:20:58 +00:00
|
|
|
sendClientNode(&tailnet.Node{PreferredDERP: 2})
|
2023-12-06 11:04:28 +00:00
|
|
|
clientNodes := testutil.RequireRecvCtx(ctx, t, agentNodeChan1)
|
2023-01-23 23:22:34 +00:00
|
|
|
require.Len(t, clientNodes, 1)
|
|
|
|
|
|
|
|
// Ensure an update to the agent node reaches the client!
|
2023-06-21 12:20:58 +00:00
|
|
|
sendAgentNode1(&tailnet.Node{PreferredDERP: 3})
|
2023-12-06 11:04:28 +00:00
|
|
|
agentNodes = testutil.RequireRecvCtx(ctx, t, clientNodeChan)
|
2023-01-23 23:22:34 +00:00
|
|
|
require.Len(t, agentNodes, 1)
|
|
|
|
|
|
|
|
// Create a new agent connection without disconnecting the old one.
|
|
|
|
agentWS2, agentServerWS2 := net.Pipe()
|
|
|
|
defer agentWS2.Close()
|
|
|
|
agentNodeChan2 := make(chan []*tailnet.Node)
|
|
|
|
_, agentErrChan2 := tailnet.ServeCoordinator(agentWS2, func(nodes []*tailnet.Node) error {
|
2023-12-06 11:04:28 +00:00
|
|
|
t.Logf("agent2 got node update: %v", nodes)
|
2023-01-23 23:22:34 +00:00
|
|
|
agentNodeChan2 <- nodes
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
closeAgentChan2 := make(chan struct{})
|
|
|
|
go func() {
|
2023-01-25 21:27:36 +00:00
|
|
|
err := coordinator.ServeAgent(agentServerWS2, agentID, "")
|
2023-01-23 23:22:34 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
close(closeAgentChan2)
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Ensure the existing listening client sends it's node immediately!
|
2023-12-06 11:04:28 +00:00
|
|
|
clientNodes = testutil.RequireRecvCtx(ctx, t, agentNodeChan2)
|
2023-01-23 23:22:34 +00:00
|
|
|
require.Len(t, clientNodes, 1)
|
|
|
|
|
2023-12-06 11:04:28 +00:00
|
|
|
// This original agent websocket should've been closed forcefully.
|
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, agentErrChan1)
|
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, closeAgentChan1)
|
2023-01-23 23:22:34 +00:00
|
|
|
|
|
|
|
err := agentWS2.Close()
|
|
|
|
require.NoError(t, err)
|
2023-12-06 11:04:28 +00:00
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, agentErrChan2)
|
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, closeAgentChan2)
|
2023-01-23 23:22:34 +00:00
|
|
|
|
|
|
|
err = clientWS.Close()
|
|
|
|
require.NoError(t, err)
|
2023-12-06 11:04:28 +00:00
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, clientErrChan)
|
|
|
|
_ = testutil.RequireRecvCtx(ctx, t, closeClientChan)
|
2023-01-23 23:22:34 +00:00
|
|
|
})
|
2024-04-10 22:15:33 +00:00
|
|
|
|
|
|
|
t.Run("AgentAck", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
|
|
|
coordinator := tailnet.NewCoordinator(logger)
|
|
|
|
ctx := testutil.Context(t, testutil.WaitShort)
|
|
|
|
|
2024-04-16 20:01:10 +00:00
|
|
|
test.ReadyForHandshakeTest(ctx, t, coordinator)
|
2024-04-10 22:15:33 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("AgentAck_NoPermission", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2024-04-16 20:01:10 +00:00
|
|
|
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
2024-04-10 22:15:33 +00:00
|
|
|
coordinator := tailnet.NewCoordinator(logger)
|
|
|
|
ctx := testutil.Context(t, testutil.WaitShort)
|
|
|
|
|
2024-04-16 20:01:10 +00:00
|
|
|
test.ReadyForHandshakeNoPermissionTest(ctx, t, coordinator)
|
2024-04-10 22:15:33 +00:00
|
|
|
})
|
2022-09-01 01:09:44 +00:00
|
|
|
}
|
2023-05-02 16:58:21 +00:00
|
|
|
|
|
|
|
// TestCoordinator_AgentUpdateWhileClientConnects tests for regression on
|
|
|
|
// https://github.com/coder/coder/issues/7295
|
|
|
|
func TestCoordinator_AgentUpdateWhileClientConnects(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
|
|
|
coordinator := tailnet.NewCoordinator(logger)
|
|
|
|
agentWS, agentServerWS := net.Pipe()
|
|
|
|
defer agentWS.Close()
|
|
|
|
|
|
|
|
agentID := uuid.New()
|
|
|
|
go func() {
|
|
|
|
err := coordinator.ServeAgent(agentServerWS, agentID, "")
|
|
|
|
assert.NoError(t, err)
|
|
|
|
}()
|
|
|
|
|
|
|
|
// send an agent update before the client connects so that there is
|
|
|
|
// node data available to send right away.
|
|
|
|
aNode := tailnet.Node{PreferredDERP: 0}
|
|
|
|
aData, err := json.Marshal(&aNode)
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = agentWS.SetWriteDeadline(time.Now().Add(testutil.WaitShort))
|
|
|
|
require.NoError(t, err)
|
|
|
|
_, err = agentWS.Write(aData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Eventually(t, func() bool {
|
|
|
|
return coordinator.Node(agentID) != nil
|
|
|
|
}, testutil.WaitShort, testutil.IntervalFast)
|
|
|
|
|
|
|
|
// Connect from the client
|
|
|
|
clientWS, clientServerWS := net.Pipe()
|
|
|
|
defer clientWS.Close()
|
|
|
|
clientID := uuid.New()
|
|
|
|
go func() {
|
|
|
|
err := coordinator.ServeClient(clientServerWS, clientID, agentID)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
}()
|
|
|
|
|
|
|
|
// peek one byte from the node update, so we know the coordinator is
|
|
|
|
// trying to write to the client.
|
|
|
|
// buffer needs to be 2 characters longer because return value is a list
|
|
|
|
// so, it needs [ and ]
|
|
|
|
buf := make([]byte, len(aData)+2)
|
|
|
|
err = clientWS.SetReadDeadline(time.Now().Add(testutil.WaitShort))
|
|
|
|
require.NoError(t, err)
|
|
|
|
n, err := clientWS.Read(buf[:1])
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 1, n)
|
|
|
|
|
|
|
|
// send a second update
|
|
|
|
aNode.PreferredDERP = 1
|
|
|
|
require.NoError(t, err)
|
|
|
|
aData, err = json.Marshal(&aNode)
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = agentWS.SetWriteDeadline(time.Now().Add(testutil.WaitShort))
|
|
|
|
require.NoError(t, err)
|
|
|
|
_, err = agentWS.Write(aData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// read the rest of the update from the client, should be initial node.
|
|
|
|
err = clientWS.SetReadDeadline(time.Now().Add(testutil.WaitShort))
|
|
|
|
require.NoError(t, err)
|
|
|
|
n, err = clientWS.Read(buf[1:])
|
|
|
|
require.NoError(t, err)
|
|
|
|
var cNodes []*tailnet.Node
|
2023-12-06 11:04:28 +00:00
|
|
|
err = json.Unmarshal(buf[:n+1], &cNodes)
|
2023-05-02 16:58:21 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, cNodes, 1)
|
|
|
|
require.Equal(t, 0, cNodes[0].PreferredDERP)
|
|
|
|
|
|
|
|
// read second update
|
|
|
|
// without a fix for https://github.com/coder/coder/issues/7295 our
|
|
|
|
// read would time out here.
|
|
|
|
err = clientWS.SetReadDeadline(time.Now().Add(testutil.WaitShort))
|
|
|
|
require.NoError(t, err)
|
|
|
|
n, err = clientWS.Read(buf)
|
|
|
|
require.NoError(t, err)
|
2023-12-06 11:04:28 +00:00
|
|
|
err = json.Unmarshal(buf[:n], &cNodes)
|
2023-05-02 16:58:21 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, cNodes, 1)
|
|
|
|
require.Equal(t, 1, cNodes[0].PreferredDERP)
|
|
|
|
}
|
2023-05-05 16:29:03 +00:00
|
|
|
|
2023-12-06 11:04:28 +00:00
|
|
|
func TestCoordinator_BidirectionalTunnels(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
2023-12-15 07:38:12 +00:00
|
|
|
coordinator := tailnet.NewCoordinator(logger)
|
2023-12-06 11:04:28 +00:00
|
|
|
ctx := testutil.Context(t, testutil.WaitShort)
|
|
|
|
test.BidirectionalTunnels(ctx, t, coordinator)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCoordinator_GracefulDisconnect(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
2023-12-15 07:38:12 +00:00
|
|
|
coordinator := tailnet.NewCoordinator(logger)
|
2023-12-06 11:04:28 +00:00
|
|
|
ctx := testutil.Context(t, testutil.WaitShort)
|
|
|
|
test.GracefulDisconnectTest(ctx, t, coordinator)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCoordinator_Lost(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
2023-12-15 07:38:12 +00:00
|
|
|
coordinator := tailnet.NewCoordinator(logger)
|
2023-12-06 11:04:28 +00:00
|
|
|
ctx := testutil.Context(t, testutil.WaitShort)
|
|
|
|
test.LostTest(ctx, t, coordinator)
|
|
|
|
}
|
|
|
|
|
2024-01-30 20:38:19 +00:00
|
|
|
func TestCoordinator_MultiAgent_CoordClose(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
|
|
|
defer cancel()
|
|
|
|
coord1 := tailnet.NewCoordinator(logger.Named("coord1"))
|
|
|
|
defer coord1.Close()
|
|
|
|
|
|
|
|
ma1 := tailnettest.NewTestMultiAgent(t, coord1)
|
|
|
|
defer ma1.Close()
|
|
|
|
|
|
|
|
err := coord1.Close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
ma1.RequireEventuallyClosed(ctx)
|
|
|
|
}
|
|
|
|
|
2023-05-05 16:29:03 +00:00
|
|
|
func websocketConn(ctx context.Context, t *testing.T) (client net.Conn, server net.Conn) {
|
|
|
|
t.Helper()
|
|
|
|
sc := make(chan net.Conn, 1)
|
|
|
|
s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
|
|
|
wss, err := websocket.Accept(rw, r, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
conn := websocket.NetConn(r.Context(), wss, websocket.MessageBinary)
|
|
|
|
sc <- conn
|
|
|
|
close(sc) // there can be only one
|
|
|
|
|
|
|
|
// hold open until context canceled
|
|
|
|
<-ctx.Done()
|
|
|
|
}))
|
|
|
|
t.Cleanup(s.Close)
|
|
|
|
// nolint: bodyclose
|
|
|
|
wsc, _, err := websocket.Dial(ctx, s.URL, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
client = websocket.NetConn(ctx, wsc, websocket.MessageBinary)
|
|
|
|
server, ok := <-sc
|
|
|
|
require.True(t, ok)
|
|
|
|
return client, server
|
|
|
|
}
|
2024-01-22 11:26:20 +00:00
|
|
|
|
|
|
|
func TestInMemoryCoordination(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
ctx := testutil.Context(t, testutil.WaitShort)
|
|
|
|
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
|
|
|
clientID := uuid.UUID{1}
|
|
|
|
agentID := uuid.UUID{2}
|
|
|
|
mCoord := tailnettest.NewMockCoordinator(gomock.NewController(t))
|
|
|
|
fConn := &fakeCoordinatee{}
|
|
|
|
|
|
|
|
reqs := make(chan *proto.CoordinateRequest, 100)
|
|
|
|
resps := make(chan *proto.CoordinateResponse, 100)
|
2024-03-01 15:02:33 +00:00
|
|
|
mCoord.EXPECT().Coordinate(gomock.Any(), clientID, gomock.Any(), tailnet.ClientCoordinateeAuth{agentID}).
|
2024-01-22 11:26:20 +00:00
|
|
|
Times(1).Return(reqs, resps)
|
|
|
|
|
|
|
|
uut := tailnet.NewInMemoryCoordination(ctx, logger, clientID, agentID, mCoord, fConn)
|
|
|
|
defer uut.Close()
|
|
|
|
|
|
|
|
coordinationTest(ctx, t, uut, fConn, reqs, resps, agentID)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-uut.Error():
|
|
|
|
require.NoError(t, err)
|
|
|
|
default:
|
|
|
|
// OK!
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestRemoteCoordination(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
ctx := testutil.Context(t, testutil.WaitShort)
|
|
|
|
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
|
|
|
clientID := uuid.UUID{1}
|
|
|
|
agentID := uuid.UUID{2}
|
|
|
|
mCoord := tailnettest.NewMockCoordinator(gomock.NewController(t))
|
|
|
|
fConn := &fakeCoordinatee{}
|
|
|
|
|
|
|
|
reqs := make(chan *proto.CoordinateRequest, 100)
|
|
|
|
resps := make(chan *proto.CoordinateResponse, 100)
|
2024-03-01 15:02:33 +00:00
|
|
|
mCoord.EXPECT().Coordinate(gomock.Any(), clientID, gomock.Any(), tailnet.ClientCoordinateeAuth{agentID}).
|
2024-01-22 11:26:20 +00:00
|
|
|
Times(1).Return(reqs, resps)
|
|
|
|
|
|
|
|
var coord tailnet.Coordinator = mCoord
|
|
|
|
coordPtr := atomic.Pointer[tailnet.Coordinator]{}
|
|
|
|
coordPtr.Store(&coord)
|
|
|
|
svc, err := tailnet.NewClientService(
|
|
|
|
logger.Named("svc"), &coordPtr,
|
|
|
|
time.Hour,
|
|
|
|
func() *tailcfg.DERPMap { panic("not implemented") },
|
|
|
|
)
|
|
|
|
require.NoError(t, err)
|
|
|
|
sC, cC := net.Pipe()
|
|
|
|
|
|
|
|
serveErr := make(chan error, 1)
|
|
|
|
go func() {
|
2024-01-23 10:27:49 +00:00
|
|
|
err := svc.ServeClient(ctx, proto.CurrentVersion.String(), sC, clientID, agentID)
|
2024-01-22 11:26:20 +00:00
|
|
|
serveErr <- err
|
|
|
|
}()
|
|
|
|
|
2024-01-30 05:58:59 +00:00
|
|
|
client, err := tailnet.NewDRPCClient(cC, logger)
|
2024-01-22 11:26:20 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
protocol, err := client.Coordinate(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
uut := tailnet.NewRemoteCoordination(logger.Named("coordination"), protocol, fConn, agentID)
|
|
|
|
defer uut.Close()
|
|
|
|
|
|
|
|
coordinationTest(ctx, t, uut, fConn, reqs, resps, agentID)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-uut.Error():
|
|
|
|
require.ErrorContains(t, err, "stream terminated by sending close")
|
|
|
|
default:
|
|
|
|
// OK!
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-10 22:15:33 +00:00
|
|
|
func TestRemoteCoordination_SendsReadyForHandshake(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
ctx := testutil.Context(t, testutil.WaitShort)
|
|
|
|
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
|
|
|
clientID := uuid.UUID{1}
|
|
|
|
agentID := uuid.UUID{2}
|
|
|
|
mCoord := tailnettest.NewMockCoordinator(gomock.NewController(t))
|
|
|
|
fConn := &fakeCoordinatee{}
|
|
|
|
|
|
|
|
reqs := make(chan *proto.CoordinateRequest, 100)
|
|
|
|
resps := make(chan *proto.CoordinateResponse, 100)
|
|
|
|
mCoord.EXPECT().Coordinate(gomock.Any(), clientID, gomock.Any(), tailnet.ClientCoordinateeAuth{agentID}).
|
|
|
|
Times(1).Return(reqs, resps)
|
|
|
|
|
|
|
|
var coord tailnet.Coordinator = mCoord
|
|
|
|
coordPtr := atomic.Pointer[tailnet.Coordinator]{}
|
|
|
|
coordPtr.Store(&coord)
|
|
|
|
svc, err := tailnet.NewClientService(
|
|
|
|
logger.Named("svc"), &coordPtr,
|
|
|
|
time.Hour,
|
|
|
|
func() *tailcfg.DERPMap { panic("not implemented") },
|
|
|
|
)
|
|
|
|
require.NoError(t, err)
|
|
|
|
sC, cC := net.Pipe()
|
|
|
|
|
|
|
|
serveErr := make(chan error, 1)
|
|
|
|
go func() {
|
|
|
|
err := svc.ServeClient(ctx, proto.CurrentVersion.String(), sC, clientID, agentID)
|
|
|
|
serveErr <- err
|
|
|
|
}()
|
|
|
|
|
|
|
|
client, err := tailnet.NewDRPCClient(cC, logger)
|
|
|
|
require.NoError(t, err)
|
|
|
|
protocol, err := client.Coordinate(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
uut := tailnet.NewRemoteCoordination(logger.Named("coordination"), protocol, fConn, uuid.UUID{})
|
|
|
|
defer uut.Close()
|
|
|
|
|
|
|
|
nk, err := key.NewNode().Public().MarshalBinary()
|
|
|
|
require.NoError(t, err)
|
|
|
|
dk, err := key.NewDisco().Public().MarshalText()
|
|
|
|
require.NoError(t, err)
|
|
|
|
testutil.RequireSendCtx(ctx, t, resps, &proto.CoordinateResponse{
|
|
|
|
PeerUpdates: []*proto.CoordinateResponse_PeerUpdate{{
|
|
|
|
Id: clientID[:],
|
|
|
|
Kind: proto.CoordinateResponse_PeerUpdate_NODE,
|
|
|
|
Node: &proto.Node{
|
|
|
|
Id: 3,
|
|
|
|
Key: nk,
|
|
|
|
Disco: string(dk),
|
|
|
|
},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
|
|
|
|
rfh := testutil.RequireRecvCtx(ctx, t, reqs)
|
|
|
|
require.NotNil(t, rfh.ReadyForHandshake)
|
|
|
|
require.Len(t, rfh.ReadyForHandshake, 1)
|
|
|
|
require.Equal(t, clientID[:], rfh.ReadyForHandshake[0].Id)
|
|
|
|
|
|
|
|
require.NoError(t, uut.Close())
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-uut.Error():
|
|
|
|
require.ErrorContains(t, err, "stream terminated by sending close")
|
|
|
|
default:
|
|
|
|
// OK!
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-22 11:26:20 +00:00
|
|
|
// coordinationTest tests that a coordination behaves correctly
|
|
|
|
func coordinationTest(
|
|
|
|
ctx context.Context, t *testing.T,
|
|
|
|
uut tailnet.Coordination, fConn *fakeCoordinatee,
|
|
|
|
reqs chan *proto.CoordinateRequest, resps chan *proto.CoordinateResponse,
|
|
|
|
agentID uuid.UUID,
|
|
|
|
) {
|
|
|
|
// It should add the tunnel, since we configured as a client
|
|
|
|
req := testutil.RequireRecvCtx(ctx, t, reqs)
|
|
|
|
require.Equal(t, agentID[:], req.GetAddTunnel().GetId())
|
|
|
|
|
|
|
|
// when we call the callback, it should send a node update
|
|
|
|
require.NotNil(t, fConn.callback)
|
|
|
|
fConn.callback(&tailnet.Node{PreferredDERP: 1})
|
|
|
|
|
|
|
|
req = testutil.RequireRecvCtx(ctx, t, reqs)
|
|
|
|
require.Equal(t, int32(1), req.GetUpdateSelf().GetNode().GetPreferredDerp())
|
|
|
|
|
|
|
|
// When we send a peer update, it should update the coordinatee
|
|
|
|
nk, err := key.NewNode().Public().MarshalBinary()
|
|
|
|
require.NoError(t, err)
|
|
|
|
dk, err := key.NewDisco().Public().MarshalText()
|
|
|
|
require.NoError(t, err)
|
|
|
|
updates := []*proto.CoordinateResponse_PeerUpdate{
|
|
|
|
{
|
|
|
|
Id: agentID[:],
|
|
|
|
Kind: proto.CoordinateResponse_PeerUpdate_NODE,
|
|
|
|
Node: &proto.Node{
|
|
|
|
Id: 2,
|
|
|
|
Key: nk,
|
|
|
|
Disco: string(dk),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
testutil.RequireSendCtx(ctx, t, resps, &proto.CoordinateResponse{PeerUpdates: updates})
|
|
|
|
require.Eventually(t, func() bool {
|
|
|
|
fConn.Lock()
|
|
|
|
defer fConn.Unlock()
|
|
|
|
return len(fConn.updates) > 0
|
|
|
|
}, testutil.WaitShort, testutil.IntervalFast)
|
|
|
|
require.Len(t, fConn.updates[0], 1)
|
|
|
|
require.Equal(t, agentID[:], fConn.updates[0][0].Id)
|
|
|
|
|
|
|
|
err = uut.Close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
uut.Error()
|
|
|
|
|
|
|
|
// When we close, it should gracefully disconnect
|
|
|
|
req = testutil.RequireRecvCtx(ctx, t, reqs)
|
|
|
|
require.NotNil(t, req.Disconnect)
|
|
|
|
|
|
|
|
// It should set all peers lost on the coordinatee
|
|
|
|
require.Equal(t, 1, fConn.setAllPeersLostCalls)
|
|
|
|
}
|
|
|
|
|
|
|
|
type fakeCoordinatee struct {
|
|
|
|
sync.Mutex
|
|
|
|
callback func(*tailnet.Node)
|
|
|
|
updates [][]*proto.CoordinateResponse_PeerUpdate
|
|
|
|
setAllPeersLostCalls int
|
2024-04-10 22:15:33 +00:00
|
|
|
tunnelDestinations map[uuid.UUID]struct{}
|
2024-01-22 11:26:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (f *fakeCoordinatee) UpdatePeers(updates []*proto.CoordinateResponse_PeerUpdate) error {
|
|
|
|
f.Lock()
|
|
|
|
defer f.Unlock()
|
|
|
|
f.updates = append(f.updates, updates)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *fakeCoordinatee) SetAllPeersLost() {
|
|
|
|
f.Lock()
|
|
|
|
defer f.Unlock()
|
|
|
|
f.setAllPeersLostCalls++
|
|
|
|
}
|
|
|
|
|
2024-04-10 22:15:33 +00:00
|
|
|
func (f *fakeCoordinatee) SetTunnelDestination(id uuid.UUID) {
|
|
|
|
f.Lock()
|
|
|
|
defer f.Unlock()
|
|
|
|
|
|
|
|
if f.tunnelDestinations == nil {
|
|
|
|
f.tunnelDestinations = map[uuid.UUID]struct{}{}
|
|
|
|
}
|
|
|
|
f.tunnelDestinations[id] = struct{}{}
|
|
|
|
}
|
|
|
|
|
2024-01-22 11:26:20 +00:00
|
|
|
func (f *fakeCoordinatee) SetNodeCallback(callback func(*tailnet.Node)) {
|
|
|
|
f.Lock()
|
|
|
|
defer f.Unlock()
|
|
|
|
f.callback = callback
|
|
|
|
}
|