From 831630fabe01d2fca5941f2ad6f16b56272cede0 Mon Sep 17 00:00:00 2001 From: Liam Cervante Date: Mon, 26 Feb 2024 10:42:14 +0100 Subject: [PATCH] stacks: the plan function should stop on validation errors (#34720) --- .../internal/stackeval/main_plan.go | 5 +- internal/stacks/stackruntime/plan_test.go | 111 ++++++++++++++++++ internal/stacks/stackruntime/validate_test.go | 93 ++++++++------- 3 files changed, 164 insertions(+), 45 deletions(-) diff --git a/internal/stacks/stackruntime/internal/stackeval/main_plan.go b/internal/stacks/stackruntime/internal/stackeval/main_plan.go index 5dd79b2fb4..65761308c7 100644 --- a/internal/stacks/stackruntime/internal/stackeval/main_plan.go +++ b/internal/stacks/stackruntime/internal/stackeval/main_plan.go @@ -7,11 +7,12 @@ import ( "context" "sync/atomic" + "google.golang.org/protobuf/types/known/anypb" + "github.com/hashicorp/terraform/internal/promising" "github.com/hashicorp/terraform/internal/stacks/stackplan" "github.com/hashicorp/terraform/internal/tfdiags" "github.com/hashicorp/terraform/version" - "google.golang.org/protobuf/types/known/anypb" ) // PlanAll visits all of the objects in the configuration and the prior state, @@ -183,7 +184,7 @@ func (m *Main) walkPlanObjectChanges(ctx context.Context, walk *planWalk, obj Pl walk.out.AnnouncePlannedChange(ctx, change) } if len(diags) != 0 { - walk.out.AnnounceDiagnostics(ctx, diags) + walk.state.AddDiags(diags) } }) } diff --git a/internal/stacks/stackruntime/plan_test.go b/internal/stacks/stackruntime/plan_test.go index 0d991a0c25..aeab381de6 100644 --- a/internal/stacks/stackruntime/plan_test.go +++ b/internal/stacks/stackruntime/plan_test.go @@ -35,6 +35,117 @@ import ( "github.com/hashicorp/terraform/version" ) +// TestPlan_valid runs the same set of configurations as TestValidate_valid. +// +// Plan should execute the same set of validations as validate, so we expect +// all of the following to be valid for both plan and validate. +// +// We also want to make sure the static and dynamic evaluations are not +// returning duplicate / conflicting diagnostics. This test will tell us if +// either plan or validate is reporting diagnostics the others are missing. +func TestPlan_valid(t *testing.T) { + for name, tc := range validConfigurations { + t.Run(name, func(t *testing.T) { + if tc.skip { + // We've added this test before the implementation was ready. + t.SkipNow() + } + + ctx := context.Background() + cfg := loadMainBundleConfigForTest(t, name) + + fakePlanTimestamp, err := time.Parse(time.RFC3339, "1991-08-25T20:57:08Z") + if err != nil { + t.Fatal(err) + } + + changesCh := make(chan stackplan.PlannedChange, 8) + diagsCh := make(chan tfdiags.Diagnostic, 2) + req := PlanRequest{ + Config: cfg, + ForcePlanTimestamp: &fakePlanTimestamp, + } + resp := PlanResponse{ + PlannedChanges: changesCh, + Diagnostics: diagsCh, + } + + go Plan(ctx, &req, &resp) + _, diags := collectPlanOutput(changesCh, diagsCh) + + // We don't care about the planned changes here, just the + // diagnostics. + + // The following will fail the test if there are any error + // diagnostics. + reportDiagnosticsForTest(t, diags) + + // We also want to fail if there are just warnings, since the + // configurations here are supposed to be totally problem-free. + if len(diags) != 0 { + // reportDiagnosticsForTest already showed the diagnostics in + // the log + t.FailNow() + } + }) + } +} + +// TestPlan_invalid runs the same set of configurations as TestValidate_invalid. +// +// Plan should execute the same set of validations as validate, so we expect +// all of the following to be invalid for both plan and validate. +// +// We also want to make sure the static and dynamic evaluations are not +// returning duplicate / conflicting diagnostics. This test will tell us if +// either plan or validate is reporting diagnostics the others are missing. +// +// The dynamic validation that happens during the plan *might* introduce +// additional diagnostics that are not present in the static validation. These +// should be added manually into this function. +func TestPlan_invalid(t *testing.T) { + for name, tc := range invalidConfigurations { + t.Run(name, func(t *testing.T) { + if tc.skip { + // We've added this test before the implementation was ready. + t.SkipNow() + } + + ctx := context.Background() + cfg := loadMainBundleConfigForTest(t, name) + + fakePlanTimestamp, err := time.Parse(time.RFC3339, "1991-08-25T20:57:08Z") + if err != nil { + t.Fatal(err) + } + + changesCh := make(chan stackplan.PlannedChange, 8) + diagsCh := make(chan tfdiags.Diagnostic, 2) + req := PlanRequest{ + Config: cfg, + ProviderFactories: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("testing"): func() (providers.Interface, error) { + return stacks_testing_provider.NewProvider(), nil + }, + }, + ForcePlanTimestamp: &fakePlanTimestamp, + } + resp := PlanResponse{ + PlannedChanges: changesCh, + Diagnostics: diagsCh, + } + + go Plan(ctx, &req, &resp) + _, gotDiags := collectPlanOutput(changesCh, diagsCh) + wantDiags := tc.diags() + + if diff := cmp.Diff(wantDiags.ForRPC(), gotDiags.ForRPC()); diff != "" { + t.Errorf("wrong diagnostics\n%s", diff) + } + }) + } +} + func TestPlanWithMissingInputVariable(t *testing.T) { ctx := context.Background() cfg := loadMainBundleConfigForTest(t, "plan-undeclared-variable-in-component") diff --git a/internal/stacks/stackruntime/validate_test.go b/internal/stacks/stackruntime/validate_test.go index 0897f36ee8..e730cd15f8 100644 --- a/internal/stacks/stackruntime/validate_test.go +++ b/internal/stacks/stackruntime/validate_test.go @@ -18,17 +18,14 @@ import ( "github.com/hashicorp/terraform/internal/tfdiags" ) -// TestValidate_valid tests that a variety of configurations under the main -// test source bundle each generate no diagnostics at all, as a -// relatively-simple way to detect accidental regressions. -// -// Any stack configuration directory that we expect should be valid can -// potentially be included in here unless it depends on provider plugins -// to complete validation, since this test cannot supply provider plugins. -func TestValidate_valid(t *testing.T) { - validConfigDirs := map[string]struct { - skip bool - }{ +type validateTestInput struct { + skip bool + diags func() tfdiags.Diagnostics +} + +var ( + // validConfigurations are shared between the validate and plan tests. + validConfigurations = map[string]validateTestInput{ "empty": {}, "variable-output-roundtrip": {}, "variable-output-roundtrip-nested": {}, @@ -37,37 +34,8 @@ func TestValidate_valid(t *testing.T) { }, } - for name, tc := range validConfigDirs { - t.Run(name, func(t *testing.T) { - if tc.skip { - // We've added this test before the implementation was ready. - t.SkipNow() - } - - ctx := context.Background() - cfg := loadMainBundleConfigForTest(t, name) - - diags := Validate(ctx, &ValidateRequest{ - Config: cfg, - }) - - // The following will fail the test if there are any error diagnostics. - reportDiagnosticsForTest(t, diags) - - // We also want to fail if there are just warnings, since the - // configurations here are supposed to be totally problem-free. - if len(diags) != 0 { - t.FailNow() // reportDiagnosticsForTest already showed the diagnostics in the log - } - }) - } -} - -func TestValidate_invalid(t *testing.T) { - tcs := map[string]struct { - diags func() tfdiags.Diagnostics - skip bool - }{ + // invalidConfigurations are shared between the validate and plan tests. + invalidConfigurations = map[string]validateTestInput{ "validate-undeclared-variable": { diags: func() tfdiags.Diagnostics { var diags tfdiags.Diagnostics @@ -139,8 +107,47 @@ func TestValidate_invalid(t *testing.T) { skip: true, }, } +) - for name, tc := range tcs { +// TestValidate_valid tests that a variety of configurations under the main +// test source bundle each generate no diagnostics at all, as a +// relatively-simple way to detect accidental regressions. +// +// Any stack configuration directory that we expect should be valid can +// potentially be included in here unless it depends on provider plugins +// to complete validation, since this test cannot supply provider plugins. +func TestValidate_valid(t *testing.T) { + for name, tc := range validConfigurations { + t.Run(name, func(t *testing.T) { + if tc.skip { + // We've added this test before the implementation was ready. + t.SkipNow() + } + + ctx := context.Background() + cfg := loadMainBundleConfigForTest(t, name) + + diags := Validate(ctx, &ValidateRequest{ + Config: cfg, + }) + + // The following will fail the test if there are any error + // diagnostics. + reportDiagnosticsForTest(t, diags) + + // We also want to fail if there are just warnings, since the + // configurations here are supposed to be totally problem-free. + if len(diags) != 0 { + // reportDiagnosticsForTest already showed the diagnostics in + // the log + t.FailNow() + } + }) + } +} + +func TestValidate_invalid(t *testing.T) { + for name, tc := range invalidConfigurations { t.Run(name, func(t *testing.T) { if tc.skip { // We've added this test before the implementation was ready.