Target pagination e2e test (#4010)

* testing/e2e: Use a separate config file for database init

The other config file contains templated parameters, which was causing
errors when loading the config file, even though the values were never
used.

* testing/e2e: add target pagination test

Exercises target pagination both over the CLI and the API. These tests
run in <10s on my machine, though they each involve creating 1000
targets (with current defaults).

---------

Co-authored-by: Michael Li <michael.li@hashicorp.com>
pull/4202/head
Johan Brandhorst-Satzkorn 2 years ago
parent acc048a1d1
commit 6198fe279e

@ -132,6 +132,7 @@ scenario "e2e_docker_base_plus" {
ldap_user_name = step.create_ldap_server.user_name
ldap_user_password = step.create_ldap_server.user_password
ldap_group_name = step.create_ldap_server.group_name
max_page_size = step.create_boundary.max_page_size
}
}
}

@ -138,6 +138,7 @@ scenario "e2e_docker_base_with_vault" {
vault_addr_internal = step.create_vault.address_internal
vault_root_token = step.create_vault.token
vault_port = step.create_vault.port
max_page_size = step.create_boundary.max_page_size
}
}
}

@ -183,6 +183,7 @@ scenario "e2e_docker_base_with_worker" {
vault_port = step.create_vault.port
worker_tag_egress = local.egress_tag
worker_tag_collocated = step.create_boundary.worker_tag
max_page_size = step.create_boundary.max_page_size
}
}
}

@ -122,6 +122,7 @@ scenario "e2e_docker_base" {
target_address = step.create_host.address
target_port = step.create_host.port
target_user = "ubuntu"
max_page_size = step.create_boundary.max_page_size
}
}
}

@ -198,6 +198,7 @@ scenario "e2e_docker_worker_registration_controller_led" {
vault_port = step.create_vault.port
worker_tag_egress = local.egress_tag
worker_tag_collocated = step.create_boundary.worker_tag
max_page_size = step.create_boundary.max_page_size
}
}
}

@ -213,6 +213,7 @@ scenario "e2e_docker_worker_registration_worker_led" {
vault_port = step.create_vault.port
worker_tag_egress = local.egress_tag
worker_tag_collocated = step.create_boundary.worker_tag
max_page_size = step.create_boundary.max_page_size
}
}
}

@ -6,6 +6,8 @@ disable_mlock = true
controller {
name = "docker-controller"
max_page_size = ${max_page_size}
database {
url = "env://BOUNDARY_POSTGRES_URL"
}

@ -0,0 +1,53 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
disable_mlock = true
controller {
name = "docker-controller"
database {
url = "env://BOUNDARY_POSTGRES_URL"
}
}
kms "aead" {
purpose = "root"
aead_type = "aes-gcm"
key = "sP1fnF5Xz85RrXyELHFeZg9Ad2qt4Z4bgNHVGtD6ung="
key_id = "global_root"
}
# This key_id needs to match the corresponding downstream worker's
# "worker-auth" kms
kms "aead" {
purpose = "worker-auth"
aead_type = "aes-gcm"
key = "OLFhJNbEb3umRjdhY15QKNEmNXokY1Iq"
key_id = "global_worker-auth"
}
kms "aead" {
purpose = "recovery"
aead_type = "aes-gcm"
key = "8fZBjCUfN0TzjEGLQldGY4+iE9AkOvCfjh7+p0GtRBQ="
key_id = "global_recovery"
}
events {
audit_enabled = true
observations_enabled = true
sysevents_enabled = true
sink "stderr" {
name = "all-events"
description = "All events sent to stderr"
event_types = ["*"]
format = "cloudevents-json"
deny_filters = [
"\"/data/request_info/method\" contains \"Status\"",
"\"/data/request_info/path\" contains \"/health\"",
]
}
}

@ -6,6 +6,8 @@ disable_mlock = true
controller {
name = "docker-controller"
max_page_size = ${max_page_size}
database {
url = "env://BOUNDARY_POSTGRES_URL"
}

@ -54,6 +54,11 @@ variable "worker_tag" {
type = string
default = "collocated"
}
variable "max_page_size" {
description = "Max allowed page size for pagination requests"
type = number
default = 10
}
resource "docker_image" "boundary" {
name = var.image_name
@ -66,7 +71,7 @@ resource "enos_local_exec" "init_database" {
TEST_DATABASE_ADDRESS = var.postgres_address
TEST_DATABASE_NETWORK = var.database_network
TEST_BOUNDARY_LICENSE = var.boundary_license
CONFIG = "${abspath(path.module)}/${var.config_file}"
CONFIG = "${abspath(path.module)}/boundary-config-init.hcl"
}
inline = ["bash ./${path.module}/init.sh"]
}
@ -114,7 +119,8 @@ resource "docker_container" "boundary" {
upload {
content = templatefile("${abspath(path.module)}/${var.config_file}", {
worker_type_tag = var.worker_tag
worker_type_tag = var.worker_tag,
max_page_size = var.max_page_size
})
file = "/boundary/boundary-config.hcl"
}
@ -173,3 +179,7 @@ output "password" {
output "worker_tag" {
value = var.worker_tag
}
output "max_page_size" {
value = var.max_page_size
}

@ -175,6 +175,10 @@ variable "worker_tag_collocated" {
type = string
default = ""
}
variable "max_page_size" {
description = "Max allowed page size for pagination requests"
type = number
}
variable "postgres_user" {
description = "Username for accessing the postgres database"
type = string
@ -294,6 +298,7 @@ resource "enos_local_exec" "run_e2e_test" {
E2E_LDAP_USER_NAME = var.ldap_user_name
E2E_LDAP_USER_PASSWORD = var.ldap_user_password
E2E_LDAP_GROUP_NAME = var.ldap_group_name
E2E_MAX_PAGE_SIZE = var.max_page_size
BOUNDARY_DIR = abspath(var.local_boundary_src_dir)
BOUNDARY_CLI_DIR = abspath(var.local_boundary_dir)
MODULE_DIR = abspath(path.module)

@ -46,6 +46,7 @@ docker run \
-e "E2E_LDAP_USER_NAME=$E2E_LDAP_USER_NAME" \
-e "E2E_LDAP_USER_PASSWORD=$E2E_LDAP_USER_PASSWORD" \
-e "E2E_LDAP_GROUP_NAME=$E2E_LDAP_GROUP_NAME" \
-e "E2E_MAX_PAGE_SIZE=$E2E_MAX_PAGE_SIZE" \
--mount type=bind,src=$BOUNDARY_DIR,dst=/src/boundary/ \
--mount type=bind,src=$MODULE_DIR/../..,dst=/testlogs \
--mount type=bind,src=$(go env GOCACHE),dst=/root/.cache/go-build \

@ -43,7 +43,8 @@ export E2E_PASSWORD_AUTH_METHOD_ID= # e.g. ampw_1234567890
export E2E_PASSWORD_ADMIN_LOGIN_NAME= # e.g. "admin"
export E2E_PASSWORD_ADMIN_PASSWORD= # e.g. "password"
export E2E_TARGET_IP= # e.g. 192.168.0.1
export E2E_TARGET_ADDRESS= # e.g. 192.168.0.1
export E2E_TARGET_PORT= # e.g. 22
export E2E_SSH_KEY_PATH= # e.g. /Users/username/key.pem
export E2E_SSH_USER= # e.g. ubuntu
@ -55,7 +56,8 @@ export E2E_PASSWORD_ADMIN_PASSWORD= # e.g. "password"
export VAULT_ADDR= # e.g. http://127.0.0.1:8200
export VAULT_TOKEN=
export E2E_TARGET_IP= # e.g. 192.168.0.1
export E2E_TARGET_ADDRESS= # e.g. 192.168.0.1
export E2E_TARGET_PORT= # e.g. 22
export E2E_SSH_KEY_PATH= # e.g. /Users/username/key.pem
export E2E_SSH_USER= # e.g. ubuntu

@ -10,6 +10,7 @@ type config struct {
TargetSshKeyPath string `envconfig:"E2E_SSH_KEY_PATH" required:"true"` // e.g. /Users/username/key.pem
TargetSshUser string `envconfig:"E2E_SSH_USER" required:"true"` // e.g. ubuntu
TargetPort string `envconfig:"E2E_TARGET_PORT" required:"true"`
MaxPageSize int `envconfig:"E2E_MAX_PAGE_SIZE" default:"1000"`
}
func loadTestConfig() (*config, error) {

@ -0,0 +1,198 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package base_test
import (
"context"
"encoding/json"
"slices"
"strconv"
"testing"
"github.com/hashicorp/boundary/api/scopes"
"github.com/hashicorp/boundary/api/targets"
"github.com/hashicorp/boundary/internal/target"
"github.com/hashicorp/boundary/testing/internal/e2e"
"github.com/hashicorp/boundary/testing/internal/e2e/boundary"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestCliPaginateTargets asserts that the CLI automatically paginates to retrieve
// all targets in a single invocation.
func TestCliPaginateTargets(t *testing.T) {
e2e.MaybeSkipTest(t)
c, err := loadTestConfig()
require.NoError(t, err)
ctx := context.Background()
boundary.AuthenticateAdminCli(t, ctx)
newOrgId := boundary.CreateNewOrgCli(t, ctx)
t.Cleanup(func() {
ctx := context.Background()
boundary.AuthenticateAdminCli(t, ctx)
output := e2e.RunCommand(ctx, "boundary", e2e.WithArgs("scopes", "delete", "-id", newOrgId))
require.NoError(t, output.Err, string(output.Stderr))
})
newProjectId := boundary.CreateNewProjectCli(t, ctx, newOrgId)
// Create enough targets to overflow a single page.
// Use the API to make creation faster.
client, err := boundary.NewApiClient()
require.NoError(t, err)
tClient := targets.NewClient(client)
targetPort, err := strconv.ParseInt(c.TargetPort, 10, 32)
require.NoError(t, err)
for i := 0; i < c.MaxPageSize+1; i++ {
_, err := tClient.Create(ctx, "tcp", newProjectId,
targets.WithName("test-target-"+strconv.Itoa(i)),
targets.WithTcpTargetDefaultPort(uint32(targetPort)),
targets.WithAddress(c.TargetAddress),
)
require.NoError(t, err)
}
// List targets recursively
output := e2e.RunCommand(ctx, "boundary",
e2e.WithArgs(
"targets", "list",
"-scope-id", newProjectId,
"-format=json",
),
)
require.NoError(t, output.Err, string(output.Stderr))
var initialTargets targets.TargetListResult
err = json.Unmarshal(output.Stdout, &initialTargets)
require.NoError(t, err)
require.Len(t, initialTargets.Items, c.MaxPageSize+1)
// Note that none of these are returned to the CLI for now.
assert.Empty(t, initialTargets.ResponseType)
assert.Empty(t, initialTargets.RemovedIds)
assert.Empty(t, initialTargets.ListToken)
// Create a new target and destroy one of the other targets
newTargetId := boundary.CreateNewTargetCli(t, ctx, newProjectId, c.TargetPort, target.WithAddress(c.TargetAddress))
output = e2e.RunCommand(ctx, "boundary",
e2e.WithArgs(
"targets", "delete",
"-id", initialTargets.Items[0].Id,
),
)
require.NoError(t, output.Err, string(output.Stderr))
// List again, should have the new target but not the deleted target
output = e2e.RunCommand(ctx, "boundary",
e2e.WithArgs(
"targets", "list",
"-scope-id", newProjectId,
"-format=json",
),
)
require.NoError(t, output.Err, string(output.Stderr))
var newTargets targets.TargetListResult
err = json.Unmarshal(output.Stdout, &newTargets)
require.NoError(t, err)
require.Len(t, newTargets.Items, c.MaxPageSize+1)
// The first item should be the most recently created, which
// should be our new target
firstItem := newTargets.Items[0]
assert.Equal(t, newTargetId, firstItem.Id)
assert.Empty(t, newTargets.ResponseType)
assert.Empty(t, newTargets.RemovedIds)
assert.Empty(t, newTargets.ListToken)
// Ensure the deleted targeted isn't returned
for _, target := range newTargets.Items {
assert.NotEqual(t, target.Id, initialTargets.Items[0].Id)
}
}
// TestApiPaginateTargets asserts that the API automatically paginates to retrieve
// all targets in a single invocation.
func TestApiPaginateTargets(t *testing.T) {
e2e.MaybeSkipTest(t)
c, err := loadTestConfig()
require.NoError(t, err)
client, err := boundary.NewApiClient()
require.NoError(t, err)
ctx := context.Background()
sClient := scopes.NewClient(client)
tClient := targets.NewClient(client)
newOrgId := boundary.CreateNewOrgApi(t, ctx, client)
t.Cleanup(func() {
ctx := context.Background()
_, err := sClient.Delete(ctx, newOrgId)
require.NoError(t, err)
})
newProjectId := boundary.CreateNewProjectApi(t, ctx, client, newOrgId)
// Create enough targets to overflow a single page.
targetPort, err := strconv.ParseInt(c.TargetPort, 10, 32)
require.NoError(t, err)
for i := 0; i < c.MaxPageSize+1; i++ {
_, err := tClient.Create(ctx, "tcp", newProjectId,
targets.WithName("test-target-"+strconv.Itoa(i)),
targets.WithTcpTargetDefaultPort(uint32(targetPort)),
targets.WithAddress(c.TargetAddress),
)
require.NoError(t, err)
}
// List targets recursively
initialTargets, err := tClient.List(ctx, newProjectId)
require.NoError(t, err)
require.Len(t, initialTargets.Items, c.MaxPageSize+1)
assert.Equal(t, "complete", initialTargets.ResponseType)
assert.Empty(t, initialTargets.RemovedIds)
assert.NotEmpty(t, initialTargets.ListToken)
mapItems, ok := initialTargets.GetResponse().Map["items"]
require.True(t, ok)
mapSliceItems, ok := mapItems.([]any)
require.True(t, ok)
assert.Len(t, mapSliceItems, c.MaxPageSize+1)
// Create a new target and destroy one of the other targets
newTargetResult, err := tClient.Create(ctx, "tcp", newProjectId,
targets.WithName("test-target-"+strconv.Itoa(c.MaxPageSize+1)),
targets.WithTcpTargetDefaultPort(uint32(targetPort)),
targets.WithAddress(c.TargetAddress),
)
require.NoError(t, err)
_, err = tClient.Delete(ctx, initialTargets.Items[0].Id)
require.NoError(t, err)
// List again, should have the new and deleted target
newTargets, err := tClient.List(ctx, newProjectId, targets.WithListToken(initialTargets.ListToken))
require.NoError(t, err)
// Note that this will likely contain all the targets,
// since they were created very shortly before the listing,
// and we add a 30 second buffer to the lower bound of update
// times when listing.
require.GreaterOrEqual(t, len(newTargets.Items), 1)
// The first item should be the most recently created, which
// should be our new target
firstItem := newTargets.Items[0]
assert.Equal(t, newTargetResult.Item.Id, firstItem.Id)
assert.Equal(t, "complete", newTargets.ResponseType)
// Note that the removed IDs may contain entries from other tests,
// so just check that there is at least 1 entry and that our entry
// is somewhere in the list.
require.GreaterOrEqual(t, len(newTargets.RemovedIds), 1)
assert.True(t, slices.ContainsFunc(newTargets.RemovedIds, func(targetId string) bool {
return targetId == initialTargets.Items[0].Id
}))
assert.NotEmpty(t, newTargets.ListToken)
// Check that the response map contains all entries
mapItems, ok = newTargets.GetResponse().Map["items"]
require.True(t, ok)
mapSliceItems, ok = mapItems.([]any)
require.True(t, ok)
assert.GreaterOrEqual(t, len(mapSliceItems), 1)
}
Loading…
Cancel
Save