stacks: the plan function should stop on validation errors (#34720)

pull/34721/head
Liam Cervante 2 years ago committed by GitHub
parent 268c8e264d
commit 831630fabe
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -7,11 +7,12 @@ import (
"context"
"sync/atomic"
"google.golang.org/protobuf/types/known/anypb"
"github.com/hashicorp/terraform/internal/promising"
"github.com/hashicorp/terraform/internal/stacks/stackplan"
"github.com/hashicorp/terraform/internal/tfdiags"
"github.com/hashicorp/terraform/version"
"google.golang.org/protobuf/types/known/anypb"
)
// PlanAll visits all of the objects in the configuration and the prior state,
@ -183,7 +184,7 @@ func (m *Main) walkPlanObjectChanges(ctx context.Context, walk *planWalk, obj Pl
walk.out.AnnouncePlannedChange(ctx, change)
}
if len(diags) != 0 {
walk.out.AnnounceDiagnostics(ctx, diags)
walk.state.AddDiags(diags)
}
})
}

@ -35,6 +35,117 @@ import (
"github.com/hashicorp/terraform/version"
)
// TestPlan_valid runs the same set of configurations as TestValidate_valid.
//
// Plan should execute the same set of validations as validate, so we expect
// all of the following to be valid for both plan and validate.
//
// We also want to make sure the static and dynamic evaluations are not
// returning duplicate / conflicting diagnostics. This test will tell us if
// either plan or validate is reporting diagnostics the others are missing.
func TestPlan_valid(t *testing.T) {
for name, tc := range validConfigurations {
t.Run(name, func(t *testing.T) {
if tc.skip {
// We've added this test before the implementation was ready.
t.SkipNow()
}
ctx := context.Background()
cfg := loadMainBundleConfigForTest(t, name)
fakePlanTimestamp, err := time.Parse(time.RFC3339, "1991-08-25T20:57:08Z")
if err != nil {
t.Fatal(err)
}
changesCh := make(chan stackplan.PlannedChange, 8)
diagsCh := make(chan tfdiags.Diagnostic, 2)
req := PlanRequest{
Config: cfg,
ForcePlanTimestamp: &fakePlanTimestamp,
}
resp := PlanResponse{
PlannedChanges: changesCh,
Diagnostics: diagsCh,
}
go Plan(ctx, &req, &resp)
_, diags := collectPlanOutput(changesCh, diagsCh)
// We don't care about the planned changes here, just the
// diagnostics.
// The following will fail the test if there are any error
// diagnostics.
reportDiagnosticsForTest(t, diags)
// We also want to fail if there are just warnings, since the
// configurations here are supposed to be totally problem-free.
if len(diags) != 0 {
// reportDiagnosticsForTest already showed the diagnostics in
// the log
t.FailNow()
}
})
}
}
// TestPlan_invalid runs the same set of configurations as TestValidate_invalid.
//
// Plan should execute the same set of validations as validate, so we expect
// all of the following to be invalid for both plan and validate.
//
// We also want to make sure the static and dynamic evaluations are not
// returning duplicate / conflicting diagnostics. This test will tell us if
// either plan or validate is reporting diagnostics the others are missing.
//
// The dynamic validation that happens during the plan *might* introduce
// additional diagnostics that are not present in the static validation. These
// should be added manually into this function.
func TestPlan_invalid(t *testing.T) {
for name, tc := range invalidConfigurations {
t.Run(name, func(t *testing.T) {
if tc.skip {
// We've added this test before the implementation was ready.
t.SkipNow()
}
ctx := context.Background()
cfg := loadMainBundleConfigForTest(t, name)
fakePlanTimestamp, err := time.Parse(time.RFC3339, "1991-08-25T20:57:08Z")
if err != nil {
t.Fatal(err)
}
changesCh := make(chan stackplan.PlannedChange, 8)
diagsCh := make(chan tfdiags.Diagnostic, 2)
req := PlanRequest{
Config: cfg,
ProviderFactories: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("testing"): func() (providers.Interface, error) {
return stacks_testing_provider.NewProvider(), nil
},
},
ForcePlanTimestamp: &fakePlanTimestamp,
}
resp := PlanResponse{
PlannedChanges: changesCh,
Diagnostics: diagsCh,
}
go Plan(ctx, &req, &resp)
_, gotDiags := collectPlanOutput(changesCh, diagsCh)
wantDiags := tc.diags()
if diff := cmp.Diff(wantDiags.ForRPC(), gotDiags.ForRPC()); diff != "" {
t.Errorf("wrong diagnostics\n%s", diff)
}
})
}
}
func TestPlanWithMissingInputVariable(t *testing.T) {
ctx := context.Background()
cfg := loadMainBundleConfigForTest(t, "plan-undeclared-variable-in-component")

@ -18,17 +18,14 @@ import (
"github.com/hashicorp/terraform/internal/tfdiags"
)
// TestValidate_valid tests that a variety of configurations under the main
// test source bundle each generate no diagnostics at all, as a
// relatively-simple way to detect accidental regressions.
//
// Any stack configuration directory that we expect should be valid can
// potentially be included in here unless it depends on provider plugins
// to complete validation, since this test cannot supply provider plugins.
func TestValidate_valid(t *testing.T) {
validConfigDirs := map[string]struct {
skip bool
}{
type validateTestInput struct {
skip bool
diags func() tfdiags.Diagnostics
}
var (
// validConfigurations are shared between the validate and plan tests.
validConfigurations = map[string]validateTestInput{
"empty": {},
"variable-output-roundtrip": {},
"variable-output-roundtrip-nested": {},
@ -37,37 +34,8 @@ func TestValidate_valid(t *testing.T) {
},
}
for name, tc := range validConfigDirs {
t.Run(name, func(t *testing.T) {
if tc.skip {
// We've added this test before the implementation was ready.
t.SkipNow()
}
ctx := context.Background()
cfg := loadMainBundleConfigForTest(t, name)
diags := Validate(ctx, &ValidateRequest{
Config: cfg,
})
// The following will fail the test if there are any error diagnostics.
reportDiagnosticsForTest(t, diags)
// We also want to fail if there are just warnings, since the
// configurations here are supposed to be totally problem-free.
if len(diags) != 0 {
t.FailNow() // reportDiagnosticsForTest already showed the diagnostics in the log
}
})
}
}
func TestValidate_invalid(t *testing.T) {
tcs := map[string]struct {
diags func() tfdiags.Diagnostics
skip bool
}{
// invalidConfigurations are shared between the validate and plan tests.
invalidConfigurations = map[string]validateTestInput{
"validate-undeclared-variable": {
diags: func() tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
@ -139,8 +107,47 @@ func TestValidate_invalid(t *testing.T) {
skip: true,
},
}
)
for name, tc := range tcs {
// TestValidate_valid tests that a variety of configurations under the main
// test source bundle each generate no diagnostics at all, as a
// relatively-simple way to detect accidental regressions.
//
// Any stack configuration directory that we expect should be valid can
// potentially be included in here unless it depends on provider plugins
// to complete validation, since this test cannot supply provider plugins.
func TestValidate_valid(t *testing.T) {
for name, tc := range validConfigurations {
t.Run(name, func(t *testing.T) {
if tc.skip {
// We've added this test before the implementation was ready.
t.SkipNow()
}
ctx := context.Background()
cfg := loadMainBundleConfigForTest(t, name)
diags := Validate(ctx, &ValidateRequest{
Config: cfg,
})
// The following will fail the test if there are any error
// diagnostics.
reportDiagnosticsForTest(t, diags)
// We also want to fail if there are just warnings, since the
// configurations here are supposed to be totally problem-free.
if len(diags) != 0 {
// reportDiagnosticsForTest already showed the diagnostics in
// the log
t.FailNow()
}
})
}
}
func TestValidate_invalid(t *testing.T) {
for name, tc := range invalidConfigurations {
t.Run(name, func(t *testing.T) {
if tc.skip {
// We've added this test before the implementation was ready.

Loading…
Cancel
Save