test(daemon): remove tests with multihop (#5397)

pull/5398/head
Damian Debkowski 1 year ago committed by GitHub
parent 2342e4b644
commit 868d7b8a2d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -1,56 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package controller_test
import (
"context"
"crypto/rand"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/hashicorp/boundary/internal/cmd/config"
"github.com/hashicorp/boundary/internal/daemon/cluster/handlers"
"github.com/hashicorp/boundary/internal/daemon/controller"
"github.com/hashicorp/boundary/internal/daemon/worker"
"github.com/hashicorp/boundary/internal/db"
pbs "github.com/hashicorp/boundary/internal/gen/controller/servers/services"
"github.com/hashicorp/boundary/internal/kms"
"github.com/hashicorp/boundary/internal/types/scope"
"github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/testing/protocmp"
)
func Test_Controller_RegisterUpstreamMessageServices(t *testing.T) {
assert, require := assert.New(t), require.New(t)
testCtx := context.Background()
conn, _ := db.TestSetup(t, "postgres")
wrapper := db.TestWrapper(t)
kmsCache := kms.TestKms(t, conn, wrapper)
require.NoError(kmsCache.CreateKeys(context.Background(), scope.Global.String(), kms.WithRandomReader(rand.Reader)))
logger := hclog.New(&hclog.LoggerOptions{
Level: hclog.Trace,
})
conf, err := config.DevController()
require.NoError(err)
c := controller.NewTestController(t, &controller.TestControllerOpts{
Config: conf,
Logger: logger.Named("controller"),
})
t.Cleanup(c.Shutdown)
kmsWorker, pkiWorker, _, _ := worker.NewTestMultihopWorkers(t, logger, c.Context(), c.ClusterAddrs(),
c.Config().WorkerAuthKms, c.Controller().ServersRepoFn, nil, nil, nil, nil)
t.Cleanup(kmsWorker.Shutdown)
t.Cleanup(pkiWorker.Shutdown)
err = handlers.RegisterUpstreamMessageHandler(testCtx, pbs.MsgType_MSG_TYPE_ECHO, &handlers.TestMockUpstreamMessageHandler{})
require.NoError(err)
resp, err := pkiWorker.Worker().SendUpstreamMessage(testCtx, &pbs.EchoUpstreamMessageRequest{Msg: "ping"})
require.NoError(err)
assert.Empty(cmp.Diff(resp, &pbs.EchoUpstreamMessageResponse{Msg: "ping"}, protocmp.Transform()))
}

@ -1,56 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package worker_test
import (
"context"
"crypto/rand"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/hashicorp/boundary/internal/cmd/config"
"github.com/hashicorp/boundary/internal/daemon/cluster/handlers"
"github.com/hashicorp/boundary/internal/daemon/controller"
"github.com/hashicorp/boundary/internal/daemon/worker"
"github.com/hashicorp/boundary/internal/db"
pbs "github.com/hashicorp/boundary/internal/gen/controller/servers/services"
"github.com/hashicorp/boundary/internal/kms"
"github.com/hashicorp/boundary/internal/types/scope"
"github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/testing/protocmp"
)
func Test_Worker_RegisterUpstreamMessageServices(t *testing.T) {
assert, require := assert.New(t), require.New(t)
testCtx := context.Background()
conn, _ := db.TestSetup(t, "postgres")
wrapper := db.TestWrapper(t)
kmsCache := kms.TestKms(t, conn, wrapper)
require.NoError(kmsCache.CreateKeys(context.Background(), scope.Global.String(), kms.WithRandomReader(rand.Reader)))
logger := hclog.New(&hclog.LoggerOptions{
Level: hclog.Trace,
})
conf, err := config.DevController()
require.NoError(err)
c := controller.NewTestController(t, &controller.TestControllerOpts{
Config: conf,
Logger: logger.Named("controller"),
})
t.Cleanup(c.Shutdown)
kmsWorker, pkiWorker, _, _ := worker.NewTestMultihopWorkers(t, logger, c.Context(), c.ClusterAddrs(),
c.Config().WorkerAuthKms, c.Controller().ServersRepoFn, nil, nil, nil, nil)
t.Cleanup(kmsWorker.Shutdown)
t.Cleanup(pkiWorker.Shutdown)
err = handlers.RegisterUpstreamMessageHandler(testCtx, pbs.MsgType_MSG_TYPE_ECHO, &handlers.TestMockUpstreamMessageHandler{})
require.NoError(err)
resp, err := pkiWorker.Worker().SendUpstreamMessage(testCtx, &pbs.EchoUpstreamMessageRequest{Msg: "ping"})
require.NoError(err)
assert.Empty(cmp.Diff(resp, &pbs.EchoUpstreamMessageResponse{Msg: "ping"}, protocmp.Transform()))
}

@ -16,7 +16,6 @@ import (
"github.com/hashicorp/boundary/internal/cmd/base"
"github.com/hashicorp/boundary/internal/cmd/config"
"github.com/hashicorp/boundary/internal/daemon/controller/common"
"github.com/hashicorp/boundary/internal/db"
"github.com/hashicorp/boundary/internal/event"
pbs "github.com/hashicorp/boundary/internal/gen/controller/servers/services"
@ -423,146 +422,6 @@ func (tw *TestWorker) AddClusterWorkerMember(t testing.TB, opts *TestWorkerOpts)
return NewTestWorker(t, nextOpts)
}
// NewTestMultihopWorkers creates a PKI-KMS and PKI worker with the controller
// as an upstream, and two child workers (one PKI, one KMS) as downstreams of
// the initial workers (child PKI -> upstream PKI-KMS, child PKI-KMS -> upstream
// PKI). Tags for the PKI and child PKI/KMS workers can be passed in, if
// desired.
func NewTestMultihopWorkers(t testing.TB,
logger hclog.Logger,
controllerContext context.Context,
clusterAddrs []string,
workerAuthKms wrapping.Wrapper,
serversRepoFn common.ServersRepoFactory,
pkiTags, childPkiTags, childKmsTags map[string][]string,
enableAuthDebugging *atomic.Bool,
) (kmsWorker, pkiWorker, childPkiWorker, childKmsWorker *TestWorker) {
require := require.New(t)
// Create a few test wrappers for the child KMS worker to use
childDownstreamWrapper1 := db.TestWrapper(t)
childDownstreamWrapper2 := db.TestWrapper(t)
childDownstreamWrapper, err := multi.NewPooledWrapper(context.Background(), childDownstreamWrapper1)
require.NoError(err)
added, err := childDownstreamWrapper.AddWrapper(context.Background(), childDownstreamWrapper2)
require.NoError(err)
require.True(added)
kmsWorker = NewTestWorker(t, &TestWorkerOpts{
WorkerAuthKms: workerAuthKms,
InitialUpstreams: clusterAddrs,
Logger: logger.Named("kmsWorker"),
WorkerAuthDebuggingEnabled: enableAuthDebugging,
DownstreamWorkerAuthKms: childDownstreamWrapper,
})
t.Cleanup(kmsWorker.Shutdown)
// Give time for it to be inserted into the database
time.Sleep(2 * time.Second)
// names should not be set when using pki workers
pkiWorkerConf, err := config.DevWorker()
require.NoError(err)
pkiWorkerConf.Worker.Name = ""
if pkiTags != nil {
pkiWorkerConf.Worker.Tags = pkiTags
}
pkiWorkerConf.Worker.InitialUpstreams = clusterAddrs
pkiWorker = NewTestWorker(t, &TestWorkerOpts{
InitialUpstreams: clusterAddrs,
Logger: logger.Named("pkiWorker"),
Config: pkiWorkerConf,
DownstreamWorkerAuthKms: childDownstreamWrapper,
WorkerAuthDebuggingEnabled: enableAuthDebugging,
})
t.Cleanup(pkiWorker.Shutdown)
// Give time for it to be inserted into the database
time.Sleep(2 * time.Second)
// Get a server repo and worker auth repo
serversRepo, err := serversRepoFn()
require.NoError(err)
// Perform initial authentication of worker to controller
reqBytes, err := base58.FastBase58Decoding(pkiWorker.Worker().WorkerAuthRegistrationRequest)
require.NoError(err)
// Decode the proto into the request and create the worker
pkiWorkerReq := new(types.FetchNodeCredentialsRequest)
require.NoError(proto.Unmarshal(reqBytes, pkiWorkerReq))
_, err = serversRepo.CreateWorker(controllerContext, &server.Worker{
Worker: &store.Worker{
ScopeId: scope.Global.String(),
},
}, server.WithFetchNodeCredentialsRequest(pkiWorkerReq))
require.NoError(err)
childPkiWorkerConf, err := config.DevWorker()
require.NoError(err)
childPkiWorkerConf.Worker.Name = ""
if childPkiTags != nil {
childPkiWorkerConf.Worker.Tags = childPkiTags
}
childPkiWorkerConf.Worker.InitialUpstreams = kmsWorker.ProxyAddrs()
childPkiWorker = NewTestWorker(t, &TestWorkerOpts{
InitialUpstreams: kmsWorker.ProxyAddrs(),
Logger: logger.Named("childPkiWorker"),
Config: childPkiWorkerConf,
WorkerRecordingStoragePath: t.TempDir(),
WorkerAuthDebuggingEnabled: enableAuthDebugging,
})
t.Cleanup(childPkiWorker.Shutdown)
// Give time for it to be inserted into the database
time.Sleep(2 * time.Second)
// Perform initial authentication of worker to controller
reqBytes, err = base58.FastBase58Decoding(childPkiWorker.Worker().WorkerAuthRegistrationRequest)
require.NoError(err)
// Decode the proto into the request and create the worker
childPkiWorkerReq := new(types.FetchNodeCredentialsRequest)
require.NoError(proto.Unmarshal(reqBytes, childPkiWorkerReq))
_, err = serversRepo.CreateWorker(controllerContext, &server.Worker{
Worker: &store.Worker{
ScopeId: scope.Global.String(),
},
}, server.WithFetchNodeCredentialsRequest(childPkiWorkerReq))
require.NoError(err)
childKmsWorkerConf, err := config.DevWorker()
require.NoError(err)
childKmsWorkerConf.Worker.Name = "child-kms-worker"
childKmsWorkerConf.Worker.Description = "child-kms-worker description"
// Set tags the same
if childKmsTags != nil {
childKmsWorkerConf.Worker.Tags = childKmsTags
}
childKmsWorkerConf.Worker.InitialUpstreams = kmsWorker.ProxyAddrs()
childKmsWorker = NewTestWorker(t, &TestWorkerOpts{
InitialUpstreams: pkiWorker.ProxyAddrs(),
Logger: logger.Named("childKmsWorker"),
Config: childKmsWorkerConf,
WorkerAuthKms: childDownstreamWrapper2,
WorkerAuthDebuggingEnabled: enableAuthDebugging,
DisableAutoStart: true,
})
childKmsWorker.w.conf.WorkerAuthStorageKms = nil
err = childKmsWorker.w.Start()
t.Cleanup(childKmsWorker.Shutdown)
if err != nil {
t.Fatal(err)
}
// Sleep so that workers can startup and connect.
time.Sleep(12 * time.Second)
return kmsWorker, pkiWorker, childPkiWorker, childKmsWorker
}
// NewAuthorizedPkiTestWorker creates a new test worker with the provided upstreams
// and creates it in the provided repo as an authorized worker. It returns
// The TestWorker and it's boundary id.

@ -10,7 +10,6 @@ import (
"crypto/x509"
"math/big"
"net"
"sync/atomic"
"testing"
"time"
@ -19,7 +18,6 @@ import (
"github.com/hashicorp/boundary/internal/daemon/worker/session"
"github.com/hashicorp/boundary/internal/db"
pbs "github.com/hashicorp/boundary/internal/gen/controller/servers/services"
"github.com/hashicorp/boundary/internal/server"
"github.com/hashicorp/boundary/sdk/pbs/controller/api/resources/targets"
"github.com/hashicorp/go-hclog"
wrapping "github.com/hashicorp/go-kms-wrapping/v2"
@ -151,64 +149,6 @@ func TestNewAuthorizedPkiTestWorker(t *testing.T) {
assert.Equal(t, "test", w.GetName())
}
func TestNewTestMultihopWorkers(t *testing.T) {
ctx := context.Background()
logger := hclog.New(&hclog.LoggerOptions{
Level: hclog.Trace,
})
conf, err := config.DevController()
require.NoError(t, err)
c := controller.NewTestController(t, &controller.TestControllerOpts{
Config: conf,
Logger: logger.Named("controller"),
})
pkiTags := map[string][]string{"connected": {"directly"}}
childPkiTags := map[string][]string{"connected": {"multihop"}}
childKmsTags := map[string][]string{"connected": {"multihop"}}
enableAuthDebugging := new(atomic.Bool)
enableAuthDebugging.Store(true)
kmsWorker, pkiWorker, childPkiWorker, childKmsWorker := NewTestMultihopWorkers(t, logger, c.Context(), c.ClusterAddrs(),
c.Config().WorkerAuthKms, c.Controller().ServersRepoFn, pkiTags, childPkiTags, childKmsTags, enableAuthDebugging)
srvRepo, err := c.Controller().ServersRepoFn()
require.NoError(t, err)
workers, err := srvRepo.ListWorkers(ctx, []string{"global"})
assert.Len(t, workers, 4)
require.NoError(t, err)
var kmsW, pkiW, childPkiW, childKmsW *server.Worker
for _, w := range workers {
switch w.GetAddress() {
case kmsWorker.ProxyAddrs()[0]:
kmsW = w
case pkiWorker.ProxyAddrs()[0]:
pkiW = w
case childPkiWorker.ProxyAddrs()[0]:
childPkiW = w
case childKmsWorker.ProxyAddrs()[0]:
childKmsW = w
}
}
require.NotNil(t, kmsW)
require.NotNil(t, pkiW)
require.NotNil(t, childPkiW)
require.NotNil(t, childKmsW)
assert.NotZero(t, kmsW.GetLastStatusTime())
assert.NotZero(t, pkiW.GetLastStatusTime())
assert.NotZero(t, childPkiW.GetLastStatusTime())
assert.NotZero(t, childKmsW.GetLastStatusTime())
assert.Equal(t, pkiTags, pkiW.GetConfigTags())
assert.Equal(t, childPkiTags, childPkiW.GetConfigTags())
assert.Equal(t, childKmsTags, childKmsW.GetConfigTags())
require.NoError(t, c.WaitForNextWorkerStatusUpdate(kmsWorker.Name()))
require.NoError(t, c.WaitForNextWorkerStatusUpdate(pkiWorker.Name()))
require.NoError(t, c.WaitForNextWorkerStatusUpdate(childPkiWorker.Name()))
require.NoError(t, c.WaitForNextWorkerStatusUpdate(childKmsWorker.Name()))
}
func TestWorkerIPv6(t *testing.T) {
require, assert := require.New(t), assert.New(t)
w := NewTestWorker(t, &TestWorkerOpts{

@ -1,107 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package worker
import (
"context"
"testing"
"time"
"github.com/hashicorp/boundary/internal/cmd/config"
"github.com/hashicorp/boundary/internal/daemon/controller"
"github.com/hashicorp/boundary/internal/server"
"github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/connectivity"
)
func TestDeleteConnectedWorkers(t *testing.T) {
ctx := context.Background()
logger := hclog.New(&hclog.LoggerOptions{
Level: hclog.Trace,
})
conf, err := config.DevController()
require.NoError(t, err)
c := controller.NewTestController(t, &controller.TestControllerOpts{
Config: conf,
Logger: logger.Named("controller"),
})
_, directPkiWorker, multiHoppedPkiWorker, multiHoppedKmsWorker := NewTestMultihopWorkers(t, logger, c.Context(), c.ClusterAddrs(),
c.Config().WorkerAuthKms, c.Controller().ServersRepoFn, nil, nil, nil, nil)
serverRepo, err := c.Controller().ServersRepoFn()
require.NoError(t, err)
workerTimeoutCtx, workerTimeoutCancel := context.WithTimeout(ctx, 5*time.Minute)
defer workerTimeoutCancel()
var pkiWorker, childPkiWorker, childKmsWorker *server.Worker
for pkiWorker == nil || childPkiWorker == nil || childKmsWorker == nil {
time.Sleep(time.Second)
select {
case <-workerTimeoutCtx.Done():
require.FailNow(t, "timeout waiting for all workers to connect")
default:
workers, err := serverRepo.ListWorkers(workerTimeoutCtx, []string{"global"}, server.WithLiveness(-1))
require.NoError(t, err)
for _, w := range workers {
if w.Type == "pki" && w.GetAddress() == multiHoppedPkiWorker.ProxyAddrs()[0] {
childPkiWorker = w
}
if w.Type == "pki" && w.GetAddress() == multiHoppedKmsWorker.ProxyAddrs()[0] {
childKmsWorker = w
}
if w.Type == "pki" && w.GetAddress() == directPkiWorker.ProxyAddrs()[0] {
pkiWorker = w
}
}
}
}
cases := []struct {
name string
workerId string
testWorker *TestWorker
}{
{
name: "multi hop pki worker",
workerId: childPkiWorker.GetPublicId(),
testWorker: multiHoppedPkiWorker,
},
{
name: "multi hop kms worker",
workerId: childKmsWorker.GetPublicId(),
testWorker: multiHoppedKmsWorker,
},
{
name: "directly connected worker",
workerId: pkiWorker.GetPublicId(),
testWorker: directPkiWorker,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
prevState := tc.testWorker.Worker().GrpcClientConn.Load().GetState()
require.NotEqual(t, connectivity.TransientFailure, prevState)
require.NotEqual(t, connectivity.Shutdown, prevState)
_, err = serverRepo.DeleteWorker(ctx, tc.workerId)
require.NoError(t, err)
stateChangeCtx, cancel := context.WithTimeout(ctx, 4*time.Second)
defer cancel()
for {
tc.testWorker.Worker().GrpcClientConn.Load().ResetConnectBackoff()
if !tc.testWorker.Worker().GrpcClientConn.Load().WaitForStateChange(stateChangeCtx, prevState) {
assert.Fail(t, "State didn't change before context timed out")
break
}
newState := tc.testWorker.Worker().GrpcClientConn.Load().GetState()
t.Logf("Changed from previous state: %s to new state: %s", prevState, newState)
if newState == connectivity.Shutdown || newState == connectivity.TransientFailure {
break
}
prevState = newState
}
})
}
}
Loading…
Cancel
Save