mirror of https://github.com/hashicorp/boundary
This adds the initial set of tests that validate setting up a dynamic host catalog on AWS Co-authored-by: Josh Brand <jbrand@hashicorp.com>pull/2506/head
parent
da885a89c3
commit
da0fb9d38a
@ -0,0 +1,192 @@
|
||||
# This scenario requires access to the boundary team's test AWS account
|
||||
scenario "e2e_host_aws" {
|
||||
terraform_cli = terraform_cli.default
|
||||
terraform = terraform.default
|
||||
providers = [
|
||||
provider.aws.default,
|
||||
provider.enos.default
|
||||
]
|
||||
|
||||
matrix {
|
||||
builder = ["local", "crt"]
|
||||
}
|
||||
|
||||
locals {
|
||||
aws_ssh_private_key_path = abspath(var.aws_ssh_private_key_path)
|
||||
boundary_install_dir = abspath(var.boundary_install_dir)
|
||||
local_boundary_dir = abspath(var.local_boundary_dir)
|
||||
build_path = {
|
||||
"local" = "/tmp",
|
||||
"crt" = var.crt_bundle_path == null ? null : abspath(var.crt_bundle_path)
|
||||
}
|
||||
tags = merge({
|
||||
"Project Name" : var.project_name
|
||||
"Project" : "Enos",
|
||||
"Environment" : "ci"
|
||||
}, var.tags)
|
||||
}
|
||||
|
||||
step "find_azs" {
|
||||
module = module.az_finder
|
||||
|
||||
variables {
|
||||
instance_type = [
|
||||
var.worker_instance_type,
|
||||
var.controller_instance_type
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
step "create_db_password" {
|
||||
module = module.random_stringifier
|
||||
}
|
||||
|
||||
step "build_boundary" {
|
||||
module = matrix.builder == "crt" ? module.build_crt : module.build_local
|
||||
|
||||
variables {
|
||||
path = local.build_path[matrix.builder]
|
||||
}
|
||||
}
|
||||
|
||||
step "create_base_infra" {
|
||||
module = module.infra
|
||||
|
||||
variables {
|
||||
availability_zones = step.find_azs.availability_zones
|
||||
common_tags = local.tags
|
||||
}
|
||||
}
|
||||
|
||||
step "create_boundary_cluster" {
|
||||
module = module.boundary
|
||||
depends_on = [
|
||||
step.create_base_infra,
|
||||
step.build_boundary
|
||||
]
|
||||
|
||||
variables {
|
||||
boundary_install_dir = local.boundary_install_dir
|
||||
common_tags = local.tags
|
||||
controller_instance_type = var.controller_instance_type
|
||||
controller_count = var.controller_count
|
||||
db_pass = step.create_db_password.string
|
||||
kms_key_arn = step.create_base_infra.kms_key_arn
|
||||
local_artifact_path = step.build_boundary.artifact_path
|
||||
ubuntu_ami_id = step.create_base_infra.ami_ids["ubuntu"]["amd64"]
|
||||
vpc_id = step.create_base_infra.vpc_id
|
||||
worker_count = var.worker_count
|
||||
worker_instance_type = var.worker_instance_type
|
||||
}
|
||||
}
|
||||
|
||||
step "create_tag1" {
|
||||
module = module.random_stringifier
|
||||
}
|
||||
|
||||
step "create_tag1_inputs" {
|
||||
module = module.generate_aws_host_tag_vars
|
||||
depends_on = [step.create_tag1]
|
||||
|
||||
variables {
|
||||
tag_name = step.create_tag1.string
|
||||
tag_value = "true"
|
||||
}
|
||||
}
|
||||
|
||||
step "create_targets_with_tag1" {
|
||||
module = module.target
|
||||
depends_on = [step.create_base_infra]
|
||||
|
||||
variables {
|
||||
ami_id = step.create_base_infra.ami_ids["ubuntu"]["amd64"]
|
||||
aws_ssh_keypair_name = var.aws_ssh_keypair_name
|
||||
enos_user = var.enos_user
|
||||
instance_type = var.target_instance_type
|
||||
vpc_id = step.create_base_infra.vpc_id
|
||||
target_count = 2
|
||||
additional_tags = step.create_tag1_inputs.tag_map
|
||||
}
|
||||
}
|
||||
|
||||
step "create_tag2" {
|
||||
module = module.random_stringifier
|
||||
}
|
||||
|
||||
step "create_tag2_inputs" {
|
||||
module = module.generate_aws_host_tag_vars
|
||||
depends_on = [step.create_tag2]
|
||||
|
||||
variables {
|
||||
tag_name = step.create_tag2.string
|
||||
tag_value = "test"
|
||||
}
|
||||
}
|
||||
|
||||
step "create_targets_with_tag2" {
|
||||
module = module.target
|
||||
depends_on = [step.create_base_infra]
|
||||
|
||||
variables {
|
||||
ami_id = step.create_base_infra.ami_ids["ubuntu"]["amd64"]
|
||||
aws_ssh_keypair_name = var.aws_ssh_keypair_name
|
||||
enos_user = var.enos_user
|
||||
instance_type = var.target_instance_type
|
||||
vpc_id = step.create_base_infra.vpc_id
|
||||
target_count = 1
|
||||
additional_tags = step.create_tag2_inputs.tag_map
|
||||
}
|
||||
}
|
||||
|
||||
step "create_test_id" {
|
||||
module = module.random_stringifier
|
||||
variables {
|
||||
length = 5
|
||||
}
|
||||
}
|
||||
|
||||
step "iam_setup" {
|
||||
module = module.iam_setup
|
||||
depends_on = [
|
||||
step.create_base_infra,
|
||||
step.create_test_id
|
||||
]
|
||||
|
||||
variables {
|
||||
test_id = step.create_test_id.string
|
||||
test_email = var.test_email
|
||||
}
|
||||
}
|
||||
|
||||
step "run_e2e_test" {
|
||||
module = module.test_e2e
|
||||
depends_on = [
|
||||
step.create_boundary_cluster,
|
||||
step.create_targets_with_tag1,
|
||||
step.create_targets_with_tag2,
|
||||
step.iam_setup
|
||||
]
|
||||
|
||||
variables {
|
||||
test_package = "github.com/hashicorp/boundary/testing/internal/e2e/host/aws"
|
||||
alb_boundary_api_addr = step.create_boundary_cluster.alb_boundary_api_addr
|
||||
auth_method_id = step.create_boundary_cluster.auth_method_id
|
||||
auth_login_name = step.create_boundary_cluster.auth_login_name
|
||||
auth_password = step.create_boundary_cluster.auth_password
|
||||
local_boundary_dir = local.local_boundary_dir
|
||||
aws_ssh_private_key_path = local.aws_ssh_private_key_path
|
||||
target_user = "ubuntu"
|
||||
aws_access_key_id = step.iam_setup.access_key_id
|
||||
aws_secret_access_key = step.iam_setup.secret_access_key
|
||||
aws_host_set_filter1 = step.create_tag1_inputs.tag_string
|
||||
aws_host_set_count1 = 2
|
||||
aws_host_set_ips1 = step.create_targets_with_tag1.target_ips
|
||||
aws_host_set_filter2 = step.create_tag2_inputs.tag_string
|
||||
aws_host_set_count2 = 1
|
||||
}
|
||||
}
|
||||
|
||||
output "test_results" {
|
||||
value = step.run_e2e_test.test_results
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,19 @@
|
||||
variable "tag_name" {
|
||||
type = string
|
||||
}
|
||||
variable "tag_value" {
|
||||
type = string
|
||||
}
|
||||
|
||||
locals {
|
||||
tag_map = { "e2e_${var.tag_name}" : var.tag_value }
|
||||
tag_string = "tag:e2e_${var.tag_name}=${var.tag_value}"
|
||||
}
|
||||
|
||||
output "tag_map" {
|
||||
value = local.tag_map
|
||||
}
|
||||
|
||||
output "tag_string" {
|
||||
value = local.tag_string
|
||||
}
|
||||
@ -0,0 +1,45 @@
|
||||
data "aws_caller_identity" "current" {}
|
||||
|
||||
variable "test_id" {}
|
||||
variable "test_email" {}
|
||||
|
||||
locals {
|
||||
# Use the AWS provided email if users are running this, override with variable for CI
|
||||
user_email = var.test_email == null ? split(":", data.aws_caller_identity.current.user_id)[1] : var.test_email
|
||||
}
|
||||
|
||||
resource "aws_iam_user" "boundary" {
|
||||
name = "boundary-e2e-${var.test_id}"
|
||||
tags = { boundary-demo = local.user_email }
|
||||
permissions_boundary = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/BoundaryDemoPermissionsBoundary"
|
||||
}
|
||||
|
||||
resource "aws_iam_user_policy" "boundary" {
|
||||
name = "boundary_e2e_${var.test_id}"
|
||||
user = aws_iam_user.boundary.name
|
||||
policy = jsonencode({
|
||||
"Version" : "2012-10-17",
|
||||
"Statement" : [
|
||||
{
|
||||
"Action" : [
|
||||
"ec2:DescribeInstances"
|
||||
],
|
||||
"Effect" : "Allow",
|
||||
"Resource" : "*"
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
|
||||
resource "aws_iam_access_key" "boundary" {
|
||||
user = aws_iam_user.boundary.name
|
||||
}
|
||||
|
||||
output "access_key_id" {
|
||||
value = aws_iam_access_key.boundary.id
|
||||
}
|
||||
|
||||
output "secret_access_key" {
|
||||
value = aws_iam_access_key.boundary.secret
|
||||
sensitive = true
|
||||
}
|
||||
@ -0,0 +1,413 @@
|
||||
package aws_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/hashicorp/boundary/api/hostcatalogs"
|
||||
"github.com/hashicorp/boundary/api/hosts"
|
||||
"github.com/hashicorp/boundary/api/hostsets"
|
||||
"github.com/hashicorp/boundary/api/targets"
|
||||
"github.com/hashicorp/boundary/testing/internal/e2e"
|
||||
"github.com/hashicorp/boundary/testing/internal/e2e/boundary"
|
||||
"github.com/kelseyhightower/envconfig"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type config struct {
|
||||
AwsAccessKeyId string `envconfig:"E2E_AWS_ACCESS_KEY_ID"`
|
||||
AwsSecretAccessKey string `envconfig:"E2E_AWS_SECRET_ACCESS_KEY"`
|
||||
AwsHostSetFilter1 string `envconfig:"E2E_AWS_HOST_SET_FILTER1"`
|
||||
AwsHostSetCount1 string `envconfig:"E2E_AWS_HOST_SET_COUNT1"`
|
||||
AwsHostSetIps1 string `envconfig:"E2E_AWS_HOST_SET_IPS1"`
|
||||
AwsHostSetFilter2 string `envconfig:"E2E_AWS_HOST_SET_FILTER2"`
|
||||
AwsHostSetCount2 string `envconfig:"E2E_AWS_HOST_SET_COUNT2"`
|
||||
TargetSshKeyPath string `envconfig:"E2E_SSH_KEY_PATH"` // e.g. /Users/username/key.pem
|
||||
TargetSshUser string `envconfig:"E2E_SSH_USER"` // e.g. ubuntu
|
||||
TargetPort string `envconfig:"E2E_SSH_PORT" default:"22"`
|
||||
}
|
||||
|
||||
func (c *config) validate() error {
|
||||
if c.AwsAccessKeyId == "" {
|
||||
return errors.New("AwsAccessKeyId is empty. Set environment variable: E2E_AWS_ACCESS_KEY_ID")
|
||||
}
|
||||
if c.AwsSecretAccessKey == "" {
|
||||
return errors.New("AwsSecretAccessKey is empty. Set environment variable: E2E_AWS_SECRET_ACCESS_KEY")
|
||||
}
|
||||
if c.AwsHostSetFilter1 == "" {
|
||||
return errors.New("AwsHostSetFilter1 is empty. Set environment variable: E2E_AWS_HOST_SET_FILTER1")
|
||||
}
|
||||
if c.AwsHostSetCount1 == "" {
|
||||
return errors.New("AwsHostSetCount1 is empty. Set environment variable: E2E_AWS_HOST_SET_COUNT1")
|
||||
}
|
||||
if c.AwsHostSetFilter2 == "" {
|
||||
return errors.New("AwsHostSetFilter2 is empty. Set environment variable: E2E_AWS_HOST_SET_FILTER2")
|
||||
}
|
||||
if c.AwsHostSetCount2 == "" {
|
||||
return errors.New("AwsHostSetCount2 is empty. Set environment variable: E2E_AWS_HOST_SET_COUNT2")
|
||||
}
|
||||
if c.TargetSshKeyPath == "" {
|
||||
return errors.New("TargetSshKeyPath is empty. Set environment variable: E2E_SSH_KEY_PATH")
|
||||
}
|
||||
if c.TargetSshUser == "" {
|
||||
return errors.New("TargetSshUser is empty. Set environment variable: E2E_SSH_USER")
|
||||
}
|
||||
if c.TargetPort == "" {
|
||||
return errors.New("TargetPort is empty. Set environment variable: E2E_SSH_PORT")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadConfig() (*config, error) {
|
||||
var c config
|
||||
err := envconfig.Process("", &c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = c.validate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &c, err
|
||||
}
|
||||
|
||||
func TestCreateAwsDynamicHostCatalogCli(t *testing.T) {
|
||||
e2e.MaybeSkipTest(t)
|
||||
c, err := loadConfig()
|
||||
require.NoError(t, err)
|
||||
|
||||
boundary.AuthenticateCli(t)
|
||||
|
||||
// Create an org and project
|
||||
newOrgId := boundary.CreateNewOrgCli(t)
|
||||
t.Logf("Created Org Id: %s", newOrgId)
|
||||
newProjectId := boundary.CreateNewProjectCli(t, newOrgId)
|
||||
t.Logf("Created Project Id: %s", newProjectId)
|
||||
|
||||
// Create a dynamic host catalog
|
||||
output := e2e.RunCommand("boundary", "host-catalogs", "create", "plugin",
|
||||
"-scope-id", newProjectId,
|
||||
"-plugin-name", "aws",
|
||||
"-attr", "disable_credential_rotation=true",
|
||||
"-attr", "region=us-east-1",
|
||||
"-secret", "access_key_id=env://E2E_AWS_ACCESS_KEY_ID",
|
||||
"-secret", "secret_access_key=env://E2E_AWS_SECRET_ACCESS_KEY",
|
||||
"-format", "json",
|
||||
)
|
||||
require.NoError(t, output.Err, string(output.Stderr))
|
||||
var newHostCatalogResult hostcatalogs.HostCatalogCreateResult
|
||||
err = json.Unmarshal(output.Stdout, &newHostCatalogResult)
|
||||
require.NoError(t, err)
|
||||
newHostCatalogId := newHostCatalogResult.Item.Id
|
||||
t.Logf("Created Host Catalog: %s", newHostCatalogId)
|
||||
|
||||
// Create a host set
|
||||
output = e2e.RunCommand("boundary", "host-sets", "create", "plugin",
|
||||
"-host-catalog-id", newHostCatalogId,
|
||||
"-attr", "filters="+c.AwsHostSetFilter1,
|
||||
"-name", "e2e Automated Test Host Set",
|
||||
"-format", "json",
|
||||
)
|
||||
require.NoError(t, output.Err, string(output.Stderr))
|
||||
var newHostSetResult hostsets.HostSetCreateResult
|
||||
err = json.Unmarshal(output.Stdout, &newHostSetResult)
|
||||
require.NoError(t, err)
|
||||
newHostSetId1 := newHostSetResult.Item.Id
|
||||
t.Logf("Created Host Set: %s", newHostSetId1)
|
||||
|
||||
// Get list of hosts in host set
|
||||
// Retry is needed here since it can take a few tries before hosts start appearing
|
||||
t.Logf("Looking for items in the host set...")
|
||||
var actualHostSetCount1 int
|
||||
err = backoff.RetryNotify(
|
||||
func() error {
|
||||
output = e2e.RunCommand("boundary", "host-sets", "read",
|
||||
"-id", newHostSetId1,
|
||||
"-format", "json",
|
||||
)
|
||||
if output.Err != nil {
|
||||
return backoff.Permanent(errors.New(string(output.Stderr)))
|
||||
}
|
||||
|
||||
var hostSetsReadResult hostsets.HostSetReadResult
|
||||
err = json.Unmarshal(output.Stdout, &hostSetsReadResult)
|
||||
if err != nil {
|
||||
return backoff.Permanent(err)
|
||||
}
|
||||
|
||||
actualHostSetCount1 = len(hostSetsReadResult.Item.HostIds)
|
||||
if actualHostSetCount1 == 0 {
|
||||
return errors.New("No items are appearing in the host set")
|
||||
}
|
||||
|
||||
t.Logf("Found %d hosts", actualHostSetCount1)
|
||||
return nil
|
||||
},
|
||||
backoff.WithMaxRetries(backoff.NewConstantBackOff(3*time.Second), 5),
|
||||
func(err error, td time.Duration) {
|
||||
t.Logf("%s. Retrying...", err.Error())
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
expectedHostSetCount1, err := strconv.Atoi(c.AwsHostSetCount1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedHostSetCount1, actualHostSetCount1, "Numbers of hosts in host set did not match expected amount")
|
||||
|
||||
// Create another host set
|
||||
output = e2e.RunCommand("boundary", "host-sets", "create", "plugin",
|
||||
"-host-catalog-id", newHostCatalogId,
|
||||
"-attr", "filters="+c.AwsHostSetFilter2,
|
||||
"-name", "e2e Automated Test Host Set2",
|
||||
"-format", "json",
|
||||
)
|
||||
require.NoError(t, output.Err, string(output.Stderr))
|
||||
var newHostSetResult2 hostsets.HostSetCreateResult
|
||||
err = json.Unmarshal(output.Stdout, &newHostSetResult2)
|
||||
require.NoError(t, err)
|
||||
newHostSetId2 := newHostSetResult2.Item.Id
|
||||
t.Logf("Created Host Set: %s", newHostSetId2)
|
||||
|
||||
// Get list of hosts in the second host set
|
||||
t.Logf("Looking for items in the second host set...")
|
||||
var actualHostSetCount2 int
|
||||
err = backoff.RetryNotify(
|
||||
func() error {
|
||||
output = e2e.RunCommand("boundary", "host-sets", "read",
|
||||
"-id", newHostSetId2,
|
||||
"-format", "json",
|
||||
)
|
||||
if output.Err != nil {
|
||||
return backoff.Permanent(errors.New(string(output.Stderr)))
|
||||
}
|
||||
|
||||
var hostSetsReadResult hostsets.HostSetReadResult
|
||||
err = json.Unmarshal(output.Stdout, &hostSetsReadResult)
|
||||
if err != nil {
|
||||
return backoff.Permanent(err)
|
||||
}
|
||||
|
||||
actualHostSetCount2 = len(hostSetsReadResult.Item.HostIds)
|
||||
if actualHostSetCount2 == 0 {
|
||||
return errors.New("No items are appearing in the host set")
|
||||
}
|
||||
|
||||
t.Logf("Found %d hosts", actualHostSetCount2)
|
||||
return nil
|
||||
},
|
||||
backoff.WithMaxRetries(backoff.NewConstantBackOff(3*time.Second), 5),
|
||||
func(err error, td time.Duration) {
|
||||
t.Logf("%s. Retrying...", err.Error())
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
expectedHostSetCount2, err := strconv.Atoi(c.AwsHostSetCount2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedHostSetCount2, actualHostSetCount2, "Numbers of hosts in host set did not match expected amount")
|
||||
|
||||
// Get list of all hosts from host catalog
|
||||
// Retry is needed here since it can take a few tries before hosts start appearing
|
||||
t.Logf("Looking for items in the host catalog...")
|
||||
var actualHostCatalogCount int
|
||||
err = backoff.RetryNotify(
|
||||
func() error {
|
||||
output = e2e.RunCommand("boundary", "hosts", "list",
|
||||
"-host-catalog-id", newHostCatalogId,
|
||||
"-format", "json",
|
||||
)
|
||||
if output.Err != nil {
|
||||
return backoff.Permanent(errors.New(string(output.Stderr)))
|
||||
}
|
||||
|
||||
var hostCatalogListResult hostcatalogs.HostCatalogListResult
|
||||
err = json.Unmarshal(output.Stdout, &hostCatalogListResult)
|
||||
if err != nil {
|
||||
return backoff.Permanent(err)
|
||||
}
|
||||
|
||||
actualHostCatalogCount = len(hostCatalogListResult.Items)
|
||||
if actualHostCatalogCount == 0 {
|
||||
return errors.New("No items are appearing in the host catalog")
|
||||
}
|
||||
|
||||
t.Logf("Found %d hosts", actualHostCatalogCount)
|
||||
return nil
|
||||
},
|
||||
backoff.WithMaxRetries(backoff.NewConstantBackOff(3*time.Second), 5),
|
||||
func(err error, td time.Duration) {
|
||||
t.Logf("%s. Retrying...", err.Error())
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
expectedHostCatalogCount := expectedHostSetCount1 + expectedHostSetCount2
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedHostCatalogCount, actualHostCatalogCount, "Numbers of hosts in host catalog did not match expected amount")
|
||||
|
||||
// Create target
|
||||
output = e2e.RunCommand("boundary", "targets", "create", "tcp",
|
||||
"-scope-id", newProjectId,
|
||||
"-default-port", c.TargetPort,
|
||||
"-name", "e2e Automated Test Target",
|
||||
"-format", "json",
|
||||
)
|
||||
require.NoError(t, output.Err, string(output.Stderr))
|
||||
var newTargetResult targets.TargetCreateResult
|
||||
err = json.Unmarshal(output.Stdout, &newTargetResult)
|
||||
require.NoError(t, err)
|
||||
newTargetId := newTargetResult.Item.Id
|
||||
t.Logf("Created Target: %s", newTargetId)
|
||||
|
||||
// Add host set to target
|
||||
output = e2e.RunCommand("boundary", "targets", "add-host-sources",
|
||||
"-id", newTargetId,
|
||||
"-host-source", newHostSetId1,
|
||||
)
|
||||
require.NoError(t, output.Err, string(output.Stderr))
|
||||
|
||||
// Connect to target
|
||||
output = e2e.RunCommand("boundary", "connect",
|
||||
"-target-id", newTargetId,
|
||||
"-exec", "/usr/bin/ssh", "--",
|
||||
"-l", c.TargetSshUser,
|
||||
"-i", c.TargetSshKeyPath,
|
||||
"-o", "UserKnownHostsFile=/dev/null",
|
||||
"-o", "StrictHostKeyChecking=no",
|
||||
"-o", "IdentitiesOnly=yes", // forces the use of the provided key
|
||||
"-p", "{{boundary.port}}", // this is provided by boundary
|
||||
"{{boundary.ip}}",
|
||||
"hostname", "-i",
|
||||
)
|
||||
require.NoError(t, output.Err, string(output.Stderr))
|
||||
|
||||
parts := strings.Fields(string(output.Stdout))
|
||||
hostIp := parts[len(parts)-1]
|
||||
t.Log("Successfully connected to the target")
|
||||
|
||||
// Check if connected host exists in the host set
|
||||
var targetIps []string
|
||||
err = json.Unmarshal([]byte(c.AwsHostSetIps1), &targetIps)
|
||||
hostIpInList := false
|
||||
for _, v := range targetIps {
|
||||
if v == hostIp {
|
||||
hostIpInList = true
|
||||
}
|
||||
}
|
||||
require.True(t, hostIpInList, fmt.Sprintf("Connected host (%s) is not in expected list (%s)", hostIp, targetIps))
|
||||
}
|
||||
|
||||
func TestCreateAwsDynamicHostCatalogApi(t *testing.T) {
|
||||
e2e.MaybeSkipTest(t)
|
||||
c, err := loadConfig()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create boundary api client
|
||||
client, err := boundary.NewApiClient()
|
||||
require.NoError(t, err)
|
||||
ctx := context.Background()
|
||||
|
||||
// Create an org and project
|
||||
newOrgId := boundary.CreateNewOrgApi(t, ctx, client)
|
||||
t.Logf("Created Org Id: %s", newOrgId)
|
||||
newProjectId := boundary.CreateNewProjectApi(t, ctx, client, newOrgId)
|
||||
t.Logf("Created Project Id: %s", newProjectId)
|
||||
|
||||
// Create a dynamic host catalog
|
||||
hcClient := hostcatalogs.NewClient(client)
|
||||
newHostCatalogResult, err := hcClient.Create(ctx, "plugin", newProjectId,
|
||||
hostcatalogs.WithName("e2e Automated Test Host Catalog"),
|
||||
hostcatalogs.WithPluginName("aws"),
|
||||
hostcatalogs.WithAttributes(map[string]interface{}{
|
||||
"disable_credential_rotation": true,
|
||||
"region": "us-east-1",
|
||||
}),
|
||||
hostcatalogs.WithSecrets(map[string]interface{}{
|
||||
"access_key_id": c.AwsAccessKeyId,
|
||||
"secret_access_key": c.AwsSecretAccessKey,
|
||||
}),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
newHostCatalogId := newHostCatalogResult.Item.Id
|
||||
t.Logf("Created Host Catalog: %s", newHostCatalogId)
|
||||
|
||||
// Create a host set and add to catalog
|
||||
hsClient := hostsets.NewClient(client)
|
||||
newHostSetResult, err := hsClient.Create(ctx, newHostCatalogId,
|
||||
hostsets.WithAttributes(map[string]interface{}{
|
||||
"filters": c.AwsHostSetFilter1,
|
||||
}),
|
||||
hostsets.WithName("e2e Automated Test Host Set"),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
newHostSetId := newHostSetResult.Item.Id
|
||||
t.Logf("Created Host Set: %s", newHostSetId)
|
||||
|
||||
// Get list of hosts in host set
|
||||
// Retry is needed here since it can take a few tries before hosts start appearing
|
||||
t.Logf("Looking for items in the host set...")
|
||||
var actualHostSetCount int
|
||||
err = backoff.RetryNotify(
|
||||
func() error {
|
||||
hostSetReadResult, err := hsClient.Read(ctx, newHostSetId)
|
||||
if err != nil {
|
||||
return backoff.Permanent(err)
|
||||
}
|
||||
|
||||
actualHostSetCount = len(hostSetReadResult.Item.HostIds)
|
||||
if actualHostSetCount == 0 {
|
||||
return errors.New("No items are appearing in the host set")
|
||||
}
|
||||
|
||||
t.Logf("Found %d hosts", actualHostSetCount)
|
||||
return nil
|
||||
},
|
||||
backoff.WithMaxRetries(backoff.NewConstantBackOff(3*time.Second), 5),
|
||||
func(err error, td time.Duration) {
|
||||
t.Logf("%s. Retrying...", err.Error())
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
t.Log("Successfully found items in the host set")
|
||||
expectedHostSetCount, err := strconv.Atoi(c.AwsHostSetCount1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedHostSetCount, actualHostSetCount, "Numbers of hosts in host set did not match expected amount")
|
||||
|
||||
// Get list of all hosts from host catalog
|
||||
// Retry is needed here since it can take a few tries before hosts start appearing
|
||||
t.Logf("Looking for items in the host catalog...")
|
||||
var actualHostCatalogCount int
|
||||
hClient := hosts.NewClient(client)
|
||||
err = backoff.RetryNotify(
|
||||
func() error {
|
||||
hostListResult, err := hClient.List(ctx, newHostCatalogId)
|
||||
if err != nil {
|
||||
return backoff.Permanent(err)
|
||||
}
|
||||
|
||||
actualHostCatalogCount = len(hostListResult.Items)
|
||||
if actualHostCatalogCount == 0 {
|
||||
return errors.New("No items are appearing in the host catalog")
|
||||
}
|
||||
|
||||
t.Logf("Found %d hosts", actualHostCatalogCount)
|
||||
return nil
|
||||
},
|
||||
backoff.WithMaxRetries(backoff.NewConstantBackOff(3*time.Second), 5),
|
||||
func(err error, td time.Duration) {
|
||||
t.Logf("%s. Retrying...", err.Error())
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
t.Log("Successfully found items in the host catalog")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, actualHostCatalogCount, expectedHostSetCount, "Numbers of hosts in host catalog did not match expected amount")
|
||||
}
|
||||
Loading…
Reference in new issue