Move and export ExpectWorkers (#4207)

This is a useful test function and keeping it in the cluster package
means it can't be used by api/cli tests.
pull/4215/head
Jeff Mitchell 2 years ago committed by GitHub
parent 9b56db8858
commit fe4c43305c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -13,6 +13,7 @@ import (
"github.com/hashicorp/boundary/internal/cmd/config"
"github.com/hashicorp/boundary/internal/daemon/controller"
"github.com/hashicorp/boundary/internal/daemon/worker"
"github.com/hashicorp/boundary/internal/tests/helper"
"github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/require"
)
@ -42,7 +43,7 @@ func TestIPv6Listener(t *testing.T) {
})
defer c1.Shutdown()
expectWorkers(t, c1)
helper.ExpectWorkers(t, c1)
wconf, err := config.DevWorker()
require.NoError(err)
@ -56,7 +57,7 @@ func TestIPv6Listener(t *testing.T) {
defer w1.Shutdown()
time.Sleep(10 * time.Second)
expectWorkers(t, c1, w1)
helper.ExpectWorkers(t, c1, w1)
c2 := c1.AddClusterControllerMember(t, &controller.TestControllerOpts{
Logger: c1.Config().Logger.ResetNamed("c2"),
@ -64,12 +65,12 @@ func TestIPv6Listener(t *testing.T) {
defer c2.Shutdown()
time.Sleep(10 * time.Second)
expectWorkers(t, c2, w1)
helper.ExpectWorkers(t, c2, w1)
require.NoError(w1.Worker().Shutdown())
time.Sleep(10 * time.Second)
expectWorkers(t, c1)
expectWorkers(t, c2)
helper.ExpectWorkers(t, c1)
helper.ExpectWorkers(t, c2)
client, err := api.NewClient(nil)
require.NoError(err)

@ -11,6 +11,7 @@ import (
"github.com/hashicorp/boundary/internal/cmd/config"
"github.com/hashicorp/boundary/internal/daemon/controller"
"github.com/hashicorp/boundary/internal/daemon/worker"
"github.com/hashicorp/boundary/internal/tests/helper"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-secure-stdlib/strutil"
"github.com/stretchr/testify/assert"
@ -37,8 +38,8 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) {
})
defer c2.Shutdown()
expectWorkers(t, c1)
expectWorkers(t, c2)
helper.ExpectWorkers(t, c1)
helper.ExpectWorkers(t, c2)
w1 := worker.NewTestWorker(t, &worker.TestWorkerOpts{
WorkerAuthKms: c1.Config().WorkerAuthKms,
@ -48,8 +49,8 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) {
defer w1.Shutdown()
time.Sleep(10 * time.Second)
expectWorkers(t, c1, w1)
expectWorkers(t, c2, w1)
helper.ExpectWorkers(t, c1, w1)
helper.ExpectWorkers(t, c2, w1)
w2 := w1.AddClusterWorkerMember(t, &worker.TestWorkerOpts{
Logger: logger.Named("w2"),
@ -57,13 +58,13 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) {
defer w2.Shutdown()
time.Sleep(10 * time.Second)
expectWorkers(t, c1, w1, w2)
expectWorkers(t, c2, w1, w2)
helper.ExpectWorkers(t, c1, w1, w2)
helper.ExpectWorkers(t, c2, w1, w2)
require.NoError(w1.Worker().Shutdown())
time.Sleep(10 * time.Second)
expectWorkers(t, c1, w2)
expectWorkers(t, c2, w2)
helper.ExpectWorkers(t, c1, w2)
helper.ExpectWorkers(t, c2, w2)
w1 = worker.NewTestWorker(t, &worker.TestWorkerOpts{
WorkerAuthKms: c1.Config().WorkerAuthKms,
@ -73,12 +74,12 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) {
defer w1.Shutdown()
time.Sleep(10 * time.Second)
expectWorkers(t, c1, w1, w2)
expectWorkers(t, c2, w1, w2)
helper.ExpectWorkers(t, c1, w1, w2)
helper.ExpectWorkers(t, c2, w1, w2)
require.NoError(c2.Controller().Shutdown())
time.Sleep(10 * time.Second)
expectWorkers(t, c1, w1, w2)
helper.ExpectWorkers(t, c1, w1, w2)
c2 = c1.AddClusterControllerMember(t, &controller.TestControllerOpts{
Logger: c1.Config().Logger.ResetNamed("c2"),
@ -86,8 +87,8 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) {
defer c2.Shutdown()
time.Sleep(10 * time.Second)
expectWorkers(t, c1, w1, w2)
expectWorkers(t, c2, w1, w2)
helper.ExpectWorkers(t, c1, w1, w2)
helper.ExpectWorkers(t, c2, w1, w2)
}
func TestWorkerAppendInitialUpstreams(t *testing.T) {
@ -106,7 +107,7 @@ func TestWorkerAppendInitialUpstreams(t *testing.T) {
})
defer c1.Shutdown()
expectWorkers(t, c1)
helper.ExpectWorkers(t, c1)
initialUpstreams := append(c1.ClusterAddrs(), "127.0.0.9")
w1 := worker.NewTestWorker(t, &worker.TestWorkerOpts{
@ -131,7 +132,7 @@ func TestWorkerAppendInitialUpstreams(t *testing.T) {
break
}
}
expectWorkers(t, c1, w1)
helper.ExpectWorkers(t, c1, w1)
// Upstreams should be equivalent to the controller cluster addr after status updates
assert.Equal(c1.ClusterAddrs(), w1.Worker().LastStatusSuccess().LastCalculatedUpstreams)

@ -96,7 +96,7 @@ func testWorkerSessionCleanupSingle(burdenCase timeoutBurdenType) func(t *testin
WorkerStatusGracePeriodDuration: controllerGracePeriod(burdenCase),
})
expectWorkers(t, c1)
helper.ExpectWorkers(t, c1)
// Wire up the testing proxies
require.Len(c1.ClusterAddrs(), 1)
@ -120,7 +120,7 @@ func testWorkerSessionCleanupSingle(burdenCase timeoutBurdenType) func(t *testin
require.NoError(err)
err = c1.WaitForNextWorkerStatusUpdate(w1.Name())
require.NoError(err)
expectWorkers(t, c1, w1)
helper.ExpectWorkers(t, c1, w1)
// Use an independent context for test things that take a context so
// that we aren't tied to any timeouts in the controller, etc. This
@ -235,8 +235,8 @@ func testWorkerSessionCleanupMulti(burdenCase timeoutBurdenType) func(t *testing
PublicClusterAddr: pl2.Addr().String(),
WorkerStatusGracePeriodDuration: controllerGracePeriod(burdenCase),
})
expectWorkers(t, c1)
expectWorkers(t, c2)
helper.ExpectWorkers(t, c1)
helper.ExpectWorkers(t, c2)
// *************
// ** Proxy 1 **
@ -284,8 +284,8 @@ func testWorkerSessionCleanupMulti(burdenCase timeoutBurdenType) func(t *testing
require.NoError(err)
err = c2.WaitForNextWorkerStatusUpdate(w1.Name())
require.NoError(err)
expectWorkers(t, c1, w1)
expectWorkers(t, c2, w1)
helper.ExpectWorkers(t, c1, w1)
helper.ExpectWorkers(t, c2, w1)
// Use an independent context for test things that take a context so
// that we aren't tied to any timeouts in the controller, etc. This

@ -15,6 +15,7 @@ import (
"github.com/hashicorp/boundary/internal/cmd/config"
"github.com/hashicorp/boundary/internal/daemon/controller"
"github.com/hashicorp/boundary/internal/daemon/worker"
"github.com/hashicorp/boundary/internal/tests/helper"
"github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/require"
)
@ -54,7 +55,7 @@ func TestUnixListener(t *testing.T) {
})
defer c1.Shutdown()
expectWorkers(t, c1)
helper.ExpectWorkers(t, c1)
wconf, err := config.DevWorker()
require.NoError(err)
@ -68,11 +69,11 @@ func TestUnixListener(t *testing.T) {
defer w1.Shutdown()
time.Sleep(10 * time.Second)
expectWorkers(t, c1, w1)
helper.ExpectWorkers(t, c1, w1)
require.NoError(w1.Worker().Shutdown())
time.Sleep(10 * time.Second)
expectWorkers(t, c1)
helper.ExpectWorkers(t, c1)
require.NoError(c1.Controller().Shutdown())
c1 = controller.NewTestController(t, &controller.TestControllerOpts{
@ -83,7 +84,7 @@ func TestUnixListener(t *testing.T) {
defer c1.Shutdown()
time.Sleep(10 * time.Second)
expectWorkers(t, c1)
helper.ExpectWorkers(t, c1)
client, err := api.NewClient(nil)
require.NoError(err)

@ -1,35 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package cluster
import (
"testing"
"time"
"github.com/hashicorp/boundary/internal/daemon/controller"
"github.com/hashicorp/boundary/internal/daemon/worker"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func expectWorkers(t *testing.T, c *controller.TestController, workers ...*worker.TestWorker) {
updateTimes := c.Controller().WorkerStatusUpdateTimes()
workerMap := map[string]*worker.TestWorker{}
for _, w := range workers {
workerMap[w.Name()] = w
}
updateTimes.Range(func(k, v any) bool {
require.NotNil(t, k)
require.NotNil(t, v)
if workerMap[k.(string)] == nil {
// We don't remove from updateTimes currently so if we're not
// expecting it we'll see an out-of-date entry
return true
}
assert.WithinDuration(t, time.Now(), v.(time.Time), 30*time.Second)
delete(workerMap, k.(string))
return true
})
assert.Empty(t, workerMap)
}

@ -44,7 +44,7 @@ func TestWorkerBytesUpDown(t *testing.T) {
WorkerStatusGracePeriodDuration: helper.DefaultWorkerStatusGracePeriod,
})
expectWorkers(t, c1)
helper.ExpectWorkers(t, c1)
// Wire up the testing proxies
require.Len(c1.ClusterAddrs(), 1)
@ -66,7 +66,7 @@ func TestWorkerBytesUpDown(t *testing.T) {
require.NoError(w1.Worker().WaitForNextSuccessfulStatusUpdate())
require.NoError(c1.WaitForNextWorkerStatusUpdate(w1.Name()))
expectWorkers(t, c1, w1)
helper.ExpectWorkers(t, c1, w1)
// Use an independent context for test things that take a context so
// that we aren't tied to any timeouts in the controller, etc. This

@ -46,7 +46,7 @@ func TestWorkerSessionProxyMultipleConnections(t *testing.T) {
})
defer c1.Shutdown()
expectWorkers(t, c1)
helper.ExpectWorkers(t, c1)
// Wire up the testing proxies
require.Len(c1.ClusterAddrs(), 1)
@ -71,7 +71,7 @@ func TestWorkerSessionProxyMultipleConnections(t *testing.T) {
require.NoError(err)
err = c1.WaitForNextWorkerStatusUpdate(w1.Name())
require.NoError(err)
expectWorkers(t, c1, w1)
helper.ExpectWorkers(t, c1, w1)
// Use an independent context for test things that take a context so
// that we aren't tied to any timeouts in the controller, etc. This

@ -11,6 +11,7 @@ import (
"github.com/hashicorp/boundary/internal/daemon/controller"
ct "github.com/hashicorp/boundary/internal/daemon/controller/handlers/targets"
"github.com/hashicorp/boundary/internal/daemon/worker"
"github.com/hashicorp/boundary/internal/tests/helper"
pb "github.com/hashicorp/boundary/sdk/pbs/controller/api/resources/targets"
"github.com/hashicorp/go-hclog"
"github.com/mr-tron/base58"
@ -37,7 +38,7 @@ func TestWorkerTagging(t *testing.T) {
ctx := c1.Context()
// No workers yet
expectWorkers(t, c1)
helper.ExpectWorkers(t, c1)
// Ensure target is valid
client := c1.Client()
@ -102,7 +103,7 @@ func TestWorkerTagging(t *testing.T) {
w3.Worker().WaitForNextSuccessfulStatusUpdate()
expectWorkers(t, c1, w1, w2, w3)
helper.ExpectWorkers(t, c1, w1, w2, w3)
// Ensure we are using the OSS filter, which uses egress only for worker selection
oldAuthFun := ct.AuthorizeSessionWorkerFilterFn

@ -20,6 +20,7 @@ import (
"github.com/hashicorp/boundary/api/targets"
"github.com/hashicorp/boundary/globals"
"github.com/hashicorp/boundary/internal/cmd/commands/connect"
"github.com/hashicorp/boundary/internal/daemon/controller"
"github.com/hashicorp/boundary/internal/daemon/controller/common"
"github.com/hashicorp/boundary/internal/daemon/worker"
"github.com/hashicorp/boundary/internal/session"
@ -502,3 +503,24 @@ func NewTestTcpServer(t *testing.T) *TestTcpServer {
go ts.run()
return ts
}
func ExpectWorkers(t *testing.T, c *controller.TestController, workers ...*worker.TestWorker) {
updateTimes := c.Controller().WorkerStatusUpdateTimes()
workerMap := map[string]*worker.TestWorker{}
for _, w := range workers {
workerMap[w.Name()] = w
}
updateTimes.Range(func(k, v any) bool {
require.NotNil(t, k)
require.NotNil(t, v)
if workerMap[k.(string)] == nil {
// We don't remove from updateTimes currently so if we're not
// expecting it we'll see an out-of-date entry
return true
}
assert.WithinDuration(t, time.Now(), v.(time.Time), 30*time.Second)
delete(workerMap, k.(string))
return true
})
assert.Empty(t, workerMap)
}

Loading…
Cancel
Save