From 6198fe279e84c880a3b8c23140333efe46c81ee5 Mon Sep 17 00:00:00 2001 From: Johan Brandhorst-Satzkorn Date: Wed, 29 Nov 2023 14:17:34 -0800 Subject: [PATCH] Target pagination e2e test (#4010) * testing/e2e: Use a separate config file for database init The other config file contains templated parameters, which was causing errors when loading the config file, even though the values were never used. * testing/e2e: add target pagination test Exercises target pagination both over the CLI and the API. These tests run in <10s on my machine, though they each involve creating 1000 targets (with current defaults). --------- Co-authored-by: Michael Li --- enos/enos-scenario-e2e-docker-base-plus.hcl | 1 + ...os-scenario-e2e-docker-base-with-vault.hcl | 1 + ...s-scenario-e2e-docker-base-with-worker.hcl | 1 + enos/enos-scenario-e2e-docker-base.hcl | 1 + ...ker-worker-registration-controller-led.hcl | 1 + ...-docker-worker-registration-worker-led.hcl | 1 + .../docker_boundary/boundary-config-bsr.hcl | 2 + .../docker_boundary/boundary-config-init.hcl | 53 +++++ .../docker_boundary/boundary-config.hcl | 2 + enos/modules/docker_boundary/main.tf | 14 +- enos/modules/test_e2e_docker/main.tf | 5 + enos/modules/test_e2e_docker/test_runner.sh | 1 + testing/internal/e2e/README.md | 6 +- testing/internal/e2e/tests/base/env_test.go | 1 + .../e2e/tests/base/paginate_target_test.go | 198 ++++++++++++++++++ 15 files changed, 284 insertions(+), 4 deletions(-) create mode 100644 enos/modules/docker_boundary/boundary-config-init.hcl create mode 100644 testing/internal/e2e/tests/base/paginate_target_test.go diff --git a/enos/enos-scenario-e2e-docker-base-plus.hcl b/enos/enos-scenario-e2e-docker-base-plus.hcl index 3e82365758..5b737d9453 100644 --- a/enos/enos-scenario-e2e-docker-base-plus.hcl +++ b/enos/enos-scenario-e2e-docker-base-plus.hcl @@ -132,6 +132,7 @@ scenario "e2e_docker_base_plus" { ldap_user_name = step.create_ldap_server.user_name ldap_user_password = step.create_ldap_server.user_password ldap_group_name = step.create_ldap_server.group_name + max_page_size = step.create_boundary.max_page_size } } } diff --git a/enos/enos-scenario-e2e-docker-base-with-vault.hcl b/enos/enos-scenario-e2e-docker-base-with-vault.hcl index b6902cfae0..48ffac9c40 100644 --- a/enos/enos-scenario-e2e-docker-base-with-vault.hcl +++ b/enos/enos-scenario-e2e-docker-base-with-vault.hcl @@ -138,6 +138,7 @@ scenario "e2e_docker_base_with_vault" { vault_addr_internal = step.create_vault.address_internal vault_root_token = step.create_vault.token vault_port = step.create_vault.port + max_page_size = step.create_boundary.max_page_size } } } diff --git a/enos/enos-scenario-e2e-docker-base-with-worker.hcl b/enos/enos-scenario-e2e-docker-base-with-worker.hcl index 19122c56a9..fb569a574f 100644 --- a/enos/enos-scenario-e2e-docker-base-with-worker.hcl +++ b/enos/enos-scenario-e2e-docker-base-with-worker.hcl @@ -183,6 +183,7 @@ scenario "e2e_docker_base_with_worker" { vault_port = step.create_vault.port worker_tag_egress = local.egress_tag worker_tag_collocated = step.create_boundary.worker_tag + max_page_size = step.create_boundary.max_page_size } } } diff --git a/enos/enos-scenario-e2e-docker-base.hcl b/enos/enos-scenario-e2e-docker-base.hcl index 7a799d4302..d56ff61b23 100644 --- a/enos/enos-scenario-e2e-docker-base.hcl +++ b/enos/enos-scenario-e2e-docker-base.hcl @@ -122,6 +122,7 @@ scenario "e2e_docker_base" { target_address = step.create_host.address target_port = step.create_host.port target_user = "ubuntu" + max_page_size = step.create_boundary.max_page_size } } } diff --git a/enos/enos-scenario-e2e-docker-worker-registration-controller-led.hcl b/enos/enos-scenario-e2e-docker-worker-registration-controller-led.hcl index 64aec5e818..25bfb1f00f 100644 --- a/enos/enos-scenario-e2e-docker-worker-registration-controller-led.hcl +++ b/enos/enos-scenario-e2e-docker-worker-registration-controller-led.hcl @@ -198,6 +198,7 @@ scenario "e2e_docker_worker_registration_controller_led" { vault_port = step.create_vault.port worker_tag_egress = local.egress_tag worker_tag_collocated = step.create_boundary.worker_tag + max_page_size = step.create_boundary.max_page_size } } } diff --git a/enos/enos-scenario-e2e-docker-worker-registration-worker-led.hcl b/enos/enos-scenario-e2e-docker-worker-registration-worker-led.hcl index 9a5ae0a052..21e83dcf76 100644 --- a/enos/enos-scenario-e2e-docker-worker-registration-worker-led.hcl +++ b/enos/enos-scenario-e2e-docker-worker-registration-worker-led.hcl @@ -213,6 +213,7 @@ scenario "e2e_docker_worker_registration_worker_led" { vault_port = step.create_vault.port worker_tag_egress = local.egress_tag worker_tag_collocated = step.create_boundary.worker_tag + max_page_size = step.create_boundary.max_page_size } } } diff --git a/enos/modules/docker_boundary/boundary-config-bsr.hcl b/enos/modules/docker_boundary/boundary-config-bsr.hcl index 93a2e4fd07..2ba20d919c 100644 --- a/enos/modules/docker_boundary/boundary-config-bsr.hcl +++ b/enos/modules/docker_boundary/boundary-config-bsr.hcl @@ -6,6 +6,8 @@ disable_mlock = true controller { name = "docker-controller" + max_page_size = ${max_page_size} + database { url = "env://BOUNDARY_POSTGRES_URL" } diff --git a/enos/modules/docker_boundary/boundary-config-init.hcl b/enos/modules/docker_boundary/boundary-config-init.hcl new file mode 100644 index 0000000000..7361c59e31 --- /dev/null +++ b/enos/modules/docker_boundary/boundary-config-init.hcl @@ -0,0 +1,53 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +disable_mlock = true + +controller { + name = "docker-controller" + + database { + url = "env://BOUNDARY_POSTGRES_URL" + } +} + +kms "aead" { + purpose = "root" + aead_type = "aes-gcm" + key = "sP1fnF5Xz85RrXyELHFeZg9Ad2qt4Z4bgNHVGtD6ung=" + key_id = "global_root" +} + +# This key_id needs to match the corresponding downstream worker's +# "worker-auth" kms +kms "aead" { + purpose = "worker-auth" + aead_type = "aes-gcm" + key = "OLFhJNbEb3umRjdhY15QKNEmNXokY1Iq" + key_id = "global_worker-auth" +} + +kms "aead" { + purpose = "recovery" + aead_type = "aes-gcm" + key = "8fZBjCUfN0TzjEGLQldGY4+iE9AkOvCfjh7+p0GtRBQ=" + key_id = "global_recovery" +} + +events { + audit_enabled = true + observations_enabled = true + sysevents_enabled = true + + sink "stderr" { + name = "all-events" + description = "All events sent to stderr" + event_types = ["*"] + format = "cloudevents-json" + + deny_filters = [ + "\"/data/request_info/method\" contains \"Status\"", + "\"/data/request_info/path\" contains \"/health\"", + ] + } +} diff --git a/enos/modules/docker_boundary/boundary-config.hcl b/enos/modules/docker_boundary/boundary-config.hcl index 3f40bbe471..416e8e0baa 100644 --- a/enos/modules/docker_boundary/boundary-config.hcl +++ b/enos/modules/docker_boundary/boundary-config.hcl @@ -6,6 +6,8 @@ disable_mlock = true controller { name = "docker-controller" + max_page_size = ${max_page_size} + database { url = "env://BOUNDARY_POSTGRES_URL" } diff --git a/enos/modules/docker_boundary/main.tf b/enos/modules/docker_boundary/main.tf index d12b352aec..915198b337 100644 --- a/enos/modules/docker_boundary/main.tf +++ b/enos/modules/docker_boundary/main.tf @@ -54,6 +54,11 @@ variable "worker_tag" { type = string default = "collocated" } +variable "max_page_size" { + description = "Max allowed page size for pagination requests" + type = number + default = 10 +} resource "docker_image" "boundary" { name = var.image_name @@ -66,7 +71,7 @@ resource "enos_local_exec" "init_database" { TEST_DATABASE_ADDRESS = var.postgres_address TEST_DATABASE_NETWORK = var.database_network TEST_BOUNDARY_LICENSE = var.boundary_license - CONFIG = "${abspath(path.module)}/${var.config_file}" + CONFIG = "${abspath(path.module)}/boundary-config-init.hcl" } inline = ["bash ./${path.module}/init.sh"] } @@ -114,7 +119,8 @@ resource "docker_container" "boundary" { upload { content = templatefile("${abspath(path.module)}/${var.config_file}", { - worker_type_tag = var.worker_tag + worker_type_tag = var.worker_tag, + max_page_size = var.max_page_size }) file = "/boundary/boundary-config.hcl" } @@ -173,3 +179,7 @@ output "password" { output "worker_tag" { value = var.worker_tag } + +output "max_page_size" { + value = var.max_page_size +} diff --git a/enos/modules/test_e2e_docker/main.tf b/enos/modules/test_e2e_docker/main.tf index 0563315485..df76429541 100644 --- a/enos/modules/test_e2e_docker/main.tf +++ b/enos/modules/test_e2e_docker/main.tf @@ -175,6 +175,10 @@ variable "worker_tag_collocated" { type = string default = "" } +variable "max_page_size" { + description = "Max allowed page size for pagination requests" + type = number +} variable "postgres_user" { description = "Username for accessing the postgres database" type = string @@ -294,6 +298,7 @@ resource "enos_local_exec" "run_e2e_test" { E2E_LDAP_USER_NAME = var.ldap_user_name E2E_LDAP_USER_PASSWORD = var.ldap_user_password E2E_LDAP_GROUP_NAME = var.ldap_group_name + E2E_MAX_PAGE_SIZE = var.max_page_size BOUNDARY_DIR = abspath(var.local_boundary_src_dir) BOUNDARY_CLI_DIR = abspath(var.local_boundary_dir) MODULE_DIR = abspath(path.module) diff --git a/enos/modules/test_e2e_docker/test_runner.sh b/enos/modules/test_e2e_docker/test_runner.sh index 2f8baf0965..ddae0f319d 100644 --- a/enos/modules/test_e2e_docker/test_runner.sh +++ b/enos/modules/test_e2e_docker/test_runner.sh @@ -46,6 +46,7 @@ docker run \ -e "E2E_LDAP_USER_NAME=$E2E_LDAP_USER_NAME" \ -e "E2E_LDAP_USER_PASSWORD=$E2E_LDAP_USER_PASSWORD" \ -e "E2E_LDAP_GROUP_NAME=$E2E_LDAP_GROUP_NAME" \ + -e "E2E_MAX_PAGE_SIZE=$E2E_MAX_PAGE_SIZE" \ --mount type=bind,src=$BOUNDARY_DIR,dst=/src/boundary/ \ --mount type=bind,src=$MODULE_DIR/../..,dst=/testlogs \ --mount type=bind,src=$(go env GOCACHE),dst=/root/.cache/go-build \ diff --git a/testing/internal/e2e/README.md b/testing/internal/e2e/README.md index c3aee25459..77dc88e678 100644 --- a/testing/internal/e2e/README.md +++ b/testing/internal/e2e/README.md @@ -43,7 +43,8 @@ export E2E_PASSWORD_AUTH_METHOD_ID= # e.g. ampw_1234567890 export E2E_PASSWORD_ADMIN_LOGIN_NAME= # e.g. "admin" export E2E_PASSWORD_ADMIN_PASSWORD= # e.g. "password" -export E2E_TARGET_IP= # e.g. 192.168.0.1 +export E2E_TARGET_ADDRESS= # e.g. 192.168.0.1 +export E2E_TARGET_PORT= # e.g. 22 export E2E_SSH_KEY_PATH= # e.g. /Users/username/key.pem export E2E_SSH_USER= # e.g. ubuntu @@ -55,7 +56,8 @@ export E2E_PASSWORD_ADMIN_PASSWORD= # e.g. "password" export VAULT_ADDR= # e.g. http://127.0.0.1:8200 export VAULT_TOKEN= -export E2E_TARGET_IP= # e.g. 192.168.0.1 +export E2E_TARGET_ADDRESS= # e.g. 192.168.0.1 +export E2E_TARGET_PORT= # e.g. 22 export E2E_SSH_KEY_PATH= # e.g. /Users/username/key.pem export E2E_SSH_USER= # e.g. ubuntu diff --git a/testing/internal/e2e/tests/base/env_test.go b/testing/internal/e2e/tests/base/env_test.go index dc6dc6cdf5..5b9d7f57a9 100644 --- a/testing/internal/e2e/tests/base/env_test.go +++ b/testing/internal/e2e/tests/base/env_test.go @@ -10,6 +10,7 @@ type config struct { TargetSshKeyPath string `envconfig:"E2E_SSH_KEY_PATH" required:"true"` // e.g. /Users/username/key.pem TargetSshUser string `envconfig:"E2E_SSH_USER" required:"true"` // e.g. ubuntu TargetPort string `envconfig:"E2E_TARGET_PORT" required:"true"` + MaxPageSize int `envconfig:"E2E_MAX_PAGE_SIZE" default:"1000"` } func loadTestConfig() (*config, error) { diff --git a/testing/internal/e2e/tests/base/paginate_target_test.go b/testing/internal/e2e/tests/base/paginate_target_test.go new file mode 100644 index 0000000000..c067e22e33 --- /dev/null +++ b/testing/internal/e2e/tests/base/paginate_target_test.go @@ -0,0 +1,198 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package base_test + +import ( + "context" + "encoding/json" + "slices" + "strconv" + "testing" + + "github.com/hashicorp/boundary/api/scopes" + "github.com/hashicorp/boundary/api/targets" + "github.com/hashicorp/boundary/internal/target" + "github.com/hashicorp/boundary/testing/internal/e2e" + "github.com/hashicorp/boundary/testing/internal/e2e/boundary" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestCliPaginateTargets asserts that the CLI automatically paginates to retrieve +// all targets in a single invocation. +func TestCliPaginateTargets(t *testing.T) { + e2e.MaybeSkipTest(t) + c, err := loadTestConfig() + require.NoError(t, err) + + ctx := context.Background() + boundary.AuthenticateAdminCli(t, ctx) + newOrgId := boundary.CreateNewOrgCli(t, ctx) + t.Cleanup(func() { + ctx := context.Background() + boundary.AuthenticateAdminCli(t, ctx) + output := e2e.RunCommand(ctx, "boundary", e2e.WithArgs("scopes", "delete", "-id", newOrgId)) + require.NoError(t, output.Err, string(output.Stderr)) + }) + newProjectId := boundary.CreateNewProjectCli(t, ctx, newOrgId) + + // Create enough targets to overflow a single page. + // Use the API to make creation faster. + client, err := boundary.NewApiClient() + require.NoError(t, err) + tClient := targets.NewClient(client) + targetPort, err := strconv.ParseInt(c.TargetPort, 10, 32) + require.NoError(t, err) + for i := 0; i < c.MaxPageSize+1; i++ { + _, err := tClient.Create(ctx, "tcp", newProjectId, + targets.WithName("test-target-"+strconv.Itoa(i)), + targets.WithTcpTargetDefaultPort(uint32(targetPort)), + targets.WithAddress(c.TargetAddress), + ) + require.NoError(t, err) + } + + // List targets recursively + output := e2e.RunCommand(ctx, "boundary", + e2e.WithArgs( + "targets", "list", + "-scope-id", newProjectId, + "-format=json", + ), + ) + require.NoError(t, output.Err, string(output.Stderr)) + + var initialTargets targets.TargetListResult + err = json.Unmarshal(output.Stdout, &initialTargets) + require.NoError(t, err) + + require.Len(t, initialTargets.Items, c.MaxPageSize+1) + // Note that none of these are returned to the CLI for now. + assert.Empty(t, initialTargets.ResponseType) + assert.Empty(t, initialTargets.RemovedIds) + assert.Empty(t, initialTargets.ListToken) + + // Create a new target and destroy one of the other targets + newTargetId := boundary.CreateNewTargetCli(t, ctx, newProjectId, c.TargetPort, target.WithAddress(c.TargetAddress)) + output = e2e.RunCommand(ctx, "boundary", + e2e.WithArgs( + "targets", "delete", + "-id", initialTargets.Items[0].Id, + ), + ) + require.NoError(t, output.Err, string(output.Stderr)) + + // List again, should have the new target but not the deleted target + output = e2e.RunCommand(ctx, "boundary", + e2e.WithArgs( + "targets", "list", + "-scope-id", newProjectId, + "-format=json", + ), + ) + require.NoError(t, output.Err, string(output.Stderr)) + + var newTargets targets.TargetListResult + err = json.Unmarshal(output.Stdout, &newTargets) + require.NoError(t, err) + + require.Len(t, newTargets.Items, c.MaxPageSize+1) + // The first item should be the most recently created, which + // should be our new target + firstItem := newTargets.Items[0] + assert.Equal(t, newTargetId, firstItem.Id) + assert.Empty(t, newTargets.ResponseType) + assert.Empty(t, newTargets.RemovedIds) + assert.Empty(t, newTargets.ListToken) + // Ensure the deleted targeted isn't returned + for _, target := range newTargets.Items { + assert.NotEqual(t, target.Id, initialTargets.Items[0].Id) + } +} + +// TestApiPaginateTargets asserts that the API automatically paginates to retrieve +// all targets in a single invocation. +func TestApiPaginateTargets(t *testing.T) { + e2e.MaybeSkipTest(t) + c, err := loadTestConfig() + require.NoError(t, err) + + client, err := boundary.NewApiClient() + require.NoError(t, err) + ctx := context.Background() + sClient := scopes.NewClient(client) + tClient := targets.NewClient(client) + newOrgId := boundary.CreateNewOrgApi(t, ctx, client) + t.Cleanup(func() { + ctx := context.Background() + _, err := sClient.Delete(ctx, newOrgId) + require.NoError(t, err) + }) + newProjectId := boundary.CreateNewProjectApi(t, ctx, client, newOrgId) + + // Create enough targets to overflow a single page. + targetPort, err := strconv.ParseInt(c.TargetPort, 10, 32) + require.NoError(t, err) + for i := 0; i < c.MaxPageSize+1; i++ { + _, err := tClient.Create(ctx, "tcp", newProjectId, + targets.WithName("test-target-"+strconv.Itoa(i)), + targets.WithTcpTargetDefaultPort(uint32(targetPort)), + targets.WithAddress(c.TargetAddress), + ) + require.NoError(t, err) + } + + // List targets recursively + initialTargets, err := tClient.List(ctx, newProjectId) + require.NoError(t, err) + + require.Len(t, initialTargets.Items, c.MaxPageSize+1) + assert.Equal(t, "complete", initialTargets.ResponseType) + assert.Empty(t, initialTargets.RemovedIds) + assert.NotEmpty(t, initialTargets.ListToken) + mapItems, ok := initialTargets.GetResponse().Map["items"] + require.True(t, ok) + mapSliceItems, ok := mapItems.([]any) + require.True(t, ok) + assert.Len(t, mapSliceItems, c.MaxPageSize+1) + + // Create a new target and destroy one of the other targets + newTargetResult, err := tClient.Create(ctx, "tcp", newProjectId, + targets.WithName("test-target-"+strconv.Itoa(c.MaxPageSize+1)), + targets.WithTcpTargetDefaultPort(uint32(targetPort)), + targets.WithAddress(c.TargetAddress), + ) + require.NoError(t, err) + _, err = tClient.Delete(ctx, initialTargets.Items[0].Id) + require.NoError(t, err) + + // List again, should have the new and deleted target + newTargets, err := tClient.List(ctx, newProjectId, targets.WithListToken(initialTargets.ListToken)) + require.NoError(t, err) + + // Note that this will likely contain all the targets, + // since they were created very shortly before the listing, + // and we add a 30 second buffer to the lower bound of update + // times when listing. + require.GreaterOrEqual(t, len(newTargets.Items), 1) + // The first item should be the most recently created, which + // should be our new target + firstItem := newTargets.Items[0] + assert.Equal(t, newTargetResult.Item.Id, firstItem.Id) + assert.Equal(t, "complete", newTargets.ResponseType) + // Note that the removed IDs may contain entries from other tests, + // so just check that there is at least 1 entry and that our entry + // is somewhere in the list. + require.GreaterOrEqual(t, len(newTargets.RemovedIds), 1) + assert.True(t, slices.ContainsFunc(newTargets.RemovedIds, func(targetId string) bool { + return targetId == initialTargets.Items[0].Id + })) + assert.NotEmpty(t, newTargets.ListToken) + // Check that the response map contains all entries + mapItems, ok = newTargets.GetResponse().Map["items"] + require.True(t, ok) + mapSliceItems, ok = mapItems.([]any) + require.True(t, ok) + assert.GreaterOrEqual(t, len(mapSliceItems), 1) +}