Skip to content

Commit

Permalink
make reauth test compat with tailscale head (#2167)
Browse files Browse the repository at this point in the history
* make reauth test compat with tailscale head

tailscale/tailscale@1eaad7d broke our reauth test as it makes the client
retry with https/443 if it reconnects within 2 minutes.

This commit fixes this by running the test as a two part,
- with https, to confirm instant reconnect works
- with http, and a 3 min wait, to check that it work without.

The change is not a general consern as headscale in prod is ran
with https.

Updates #2164

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>

* sort test for stable order

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>

---------

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
  • Loading branch information
kradalby authored Oct 3, 2024
1 parent e16ea2e commit 9515040
Show file tree
Hide file tree
Showing 2 changed files with 106 additions and 80 deletions.
5 changes: 4 additions & 1 deletion hscontrol/db/preauth_keys_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package db

import (
"sort"
"time"

"github.com/juanfont/headscale/hscontrol/types"
Expand Down Expand Up @@ -169,5 +170,7 @@ func (*Suite) TestPreAuthKeyACLTags(c *check.C) {

listedPaks, err := db.ListPreAuthKeys("test8")
c.Assert(err, check.IsNil)
c.Assert(listedPaks[0].Proto().GetAclTags(), check.DeepEquals, tags)
gotTags := listedPaks[0].Proto().GetAclTags()
sort.Sort(sort.StringSlice(gotTags))
c.Assert(gotTags, check.DeepEquals, tags)
}
181 changes: 102 additions & 79 deletions integration/general_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,110 +107,133 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) {
IntegrationSkip(t)
t.Parallel()

scenario, err := NewScenario(dockertestMaxWait())
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
for _, https := range []bool{true, false} {
t.Run(fmt.Sprintf("with-https-%t", https), func(t *testing.T) {
scenario, err := NewScenario(dockertestMaxWait())
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)

spec := map[string]int{
"user1": len(MustTestVersions),
"user2": len(MustTestVersions),
}
spec := map[string]int{
"user1": len(MustTestVersions),
"user2": len(MustTestVersions),
}

err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyip"))
assertNoErrHeadscaleEnv(t, err)
opts := []hsic.Option{hsic.WithTestName("pingallbyip")}
if https {
opts = []hsic.Option{
hsic.WithTestName("pingallbyip"),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithTLS(),
hsic.WithHostnameAsServerURL(),
}
}

allClients, err := scenario.ListTailscaleClients()
assertNoErrListClients(t, err)
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, opts...)
assertNoErrHeadscaleEnv(t, err)

err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
allClients, err := scenario.ListTailscaleClients()
assertNoErrListClients(t, err)

// assertClientsState(t, allClients)
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)

clientIPs := make(map[TailscaleClient][]netip.Addr)
for _, client := range allClients {
ips, err := client.IPs()
if err != nil {
t.Fatalf("failed to get IPs for client %s: %s", client.Hostname(), err)
}
clientIPs[client] = ips
}
// assertClientsState(t, allClients)

for _, client := range allClients {
err := client.Logout()
if err != nil {
t.Fatalf("failed to logout client %s: %s", client.Hostname(), err)
}
}
clientIPs := make(map[TailscaleClient][]netip.Addr)
for _, client := range allClients {
ips, err := client.IPs()
if err != nil {
t.Fatalf("failed to get IPs for client %s: %s", client.Hostname(), err)
}
clientIPs[client] = ips
}

err = scenario.WaitForTailscaleLogout()
assertNoErrLogout(t, err)
for _, client := range allClients {
err := client.Logout()
if err != nil {
t.Fatalf("failed to logout client %s: %s", client.Hostname(), err)
}
}

t.Logf("all clients logged out")
err = scenario.WaitForTailscaleLogout()
assertNoErrLogout(t, err)

headscale, err := scenario.Headscale()
assertNoErrGetHeadscale(t, err)
t.Logf("all clients logged out")

for userName := range spec {
key, err := scenario.CreatePreAuthKey(userName, true, false)
if err != nil {
t.Fatalf("failed to create pre-auth key for user %s: %s", userName, err)
}
headscale, err := scenario.Headscale()
assertNoErrGetHeadscale(t, err)

err = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey())
if err != nil {
t.Fatalf("failed to run tailscale up for user %s: %s", userName, err)
}
}
// if the server is not running with HTTPS, we have to wait a bit before
// reconnection as the newest Tailscale client has a measure that will only
// reconnect over HTTPS if they saw a noise connection previously.
// https://github.com/tailscale/tailscale/commit/1eaad7d3deb0815e8932e913ca1a862afa34db38
// https://github.com/juanfont/headscale/issues/2164
if !https {
time.Sleep(3 * time.Minute)
}

err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
for userName := range spec {
key, err := scenario.CreatePreAuthKey(userName, true, false)
if err != nil {
t.Fatalf("failed to create pre-auth key for user %s: %s", userName, err)
}

// assertClientsState(t, allClients)
err = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey())
if err != nil {
t.Fatalf("failed to run tailscale up for user %s: %s", userName, err)
}
}

allClients, err = scenario.ListTailscaleClients()
assertNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)

allIps, err := scenario.ListTailscaleClientsIPs()
assertNoErrListClientIPs(t, err)
// assertClientsState(t, allClients)

allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
})
allClients, err = scenario.ListTailscaleClients()
assertNoErrListClients(t, err)

success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
allIps, err := scenario.ListTailscaleClientsIPs()
assertNoErrListClientIPs(t, err)

for _, client := range allClients {
ips, err := client.IPs()
if err != nil {
t.Fatalf("failed to get IPs for client %s: %s", client.Hostname(), err)
}
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
})

// lets check if the IPs are the same
if len(ips) != len(clientIPs[client]) {
t.Fatalf("IPs changed for client %s", client.Hostname())
}
success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))

for _, ip := range ips {
found := false
for _, oldIP := range clientIPs[client] {
if ip == oldIP {
found = true
for _, client := range allClients {
ips, err := client.IPs()
if err != nil {
t.Fatalf("failed to get IPs for client %s: %s", client.Hostname(), err)
}

break
// lets check if the IPs are the same
if len(ips) != len(clientIPs[client]) {
t.Fatalf("IPs changed for client %s", client.Hostname())
}
}

if !found {
t.Fatalf(
"IPs changed for client %s. Used to be %v now %v",
client.Hostname(),
clientIPs[client],
ips,
)
for _, ip := range ips {
found := false
for _, oldIP := range clientIPs[client] {
if ip == oldIP {
found = true

break
}
}

if !found {
t.Fatalf(
"IPs changed for client %s. Used to be %v now %v",
client.Hostname(),
clientIPs[client],
ips,
)
}
}
}
}
})
}
}

Expand Down

0 comments on commit 9515040

Please sign in to comment.