diff --git a/.changes/v1.14/ENHANCEMENTS-20250723-122922.yaml b/.changes/v1.14/ENHANCEMENTS-20250723-122922.yaml new file mode 100644 index 0000000000..2738fafb61 --- /dev/null +++ b/.changes/v1.14/ENHANCEMENTS-20250723-122922.yaml @@ -0,0 +1,5 @@ +kind: ENHANCEMENTS +body: 'terraform test: expected diagnostics will be included in test output when running in verbose mode"' +time: 2025-07-23T12:29:22.244611+02:00 +custom: + Issue: "37362" diff --git a/internal/command/test_test.go b/internal/command/test_test.go index a37d9bc031..e2055c892c 100644 --- a/internal/command/test_test.go +++ b/internal/command/test_test.go @@ -130,6 +130,37 @@ func TestTest_Runs(t *testing.T) { expectedOut: []string{"1 passed, 0 failed."}, code: 0, }, + "expect_failures_outputs": { + expectedOut: []string{"1 passed, 0 failed."}, + code: 0, + }, + "expect_failures_checks_verbose": { + override: "expect_failures_checks", + args: []string{"-verbose"}, + expectedOut: []string{"1 passed, 0 failed.", "Warning: Check block assertion failed"}, + code: 0, + }, + "expect_failures_inputs_verbose": { + override: "expect_failures_inputs", + args: []string{"-verbose"}, + expectedOut: []string{"1 passed, 0 failed."}, + expectedErr: []string{"Error: Invalid value for variable"}, + code: 0, + }, + "expect_failures_resources_verbose": { + override: "expect_failures_resources", + args: []string{"-verbose"}, + expectedOut: []string{"1 passed, 0 failed."}, + expectedErr: []string{"Error: Resource postcondition failed"}, + code: 0, + }, + "expect_failures_outputs_verbose": { + override: "expect_failures_outputs", + args: []string{"-verbose"}, + expectedOut: []string{"1 passed, 0 failed."}, + expectedErr: []string{"Error: Module output value precondition failed"}, + code: 0, + }, "multiple_files": { expectedOut: []string{"2 passed, 0 failed"}, code: 0, diff --git a/internal/moduletest/graph/apply.go b/internal/moduletest/graph/apply.go index cda8680ef0..9efcf559f6 100644 --- a/internal/moduletest/graph/apply.go +++ b/internal/moduletest/graph/apply.go @@ -63,7 +63,7 @@ func (n *NodeTestRun) testApply(ctx *EvalContext, variables terraform.InputValue // Remove expected diagnostics, and add diagnostics in case anything that should have failed didn't. // We'll also update the run status based on the presence of errors or missing expected failures. - failOrErr := n.checkForMissingExpectedFailures(run, applyDiags) + failOrErr := n.checkForMissingExpectedFailures(ctx, run, applyDiags) if failOrErr { // Even though the apply operation failed, the graph may have done // partial updates and the returned state should reflect this. @@ -172,11 +172,19 @@ func (n *NodeTestRun) apply(tfCtx *terraform.Context, plan *plans.Plan, progress // checkForMissingExpectedFailures checks for missing expected failures in the diagnostics. // It updates the run status based on the presence of errors or missing expected failures. -func (n *NodeTestRun) checkForMissingExpectedFailures(run *moduletest.Run, diags tfdiags.Diagnostics) (failOrErr bool) { +func (n *NodeTestRun) checkForMissingExpectedFailures(ctx *EvalContext, run *moduletest.Run, diags tfdiags.Diagnostics) (failOrErr bool) { // Retrieve and append diagnostics that are either unrelated to expected failures // or report missing expected failures. unexpectedDiags := run.ValidateExpectedFailures(diags) - run.Diagnostics = run.Diagnostics.Append(unexpectedDiags) + + if ctx.Verbose() { + // in verbose mode, we still add all the original diagnostics for + // display even if they are expected. + run.Diagnostics = run.Diagnostics.Append(diags) + } else { + run.Diagnostics = run.Diagnostics.Append(unexpectedDiags) + } + for _, diag := range unexpectedDiags { // // If any diagnostic indicates a missing expected failure, set the run status to fail. if ok := moduletest.DiagnosticFromMissingExpectedFailure(diag); ok { diff --git a/internal/moduletest/graph/plan.go b/internal/moduletest/graph/plan.go index 53d3f9407b..8386f9083c 100644 --- a/internal/moduletest/graph/plan.go +++ b/internal/moduletest/graph/plan.go @@ -32,11 +32,19 @@ func (n *NodeTestRun) testPlan(ctx *EvalContext, variables terraform.InputValues tfCtx, _ := terraform.NewContext(n.opts.ContextOpts) // execute the terraform plan operation - planScope, plan, planDiags := n.plan(ctx, tfCtx, setVariables, providers, mocks, waiter) + planScope, plan, originalDiags := n.plan(ctx, tfCtx, setVariables, providers, mocks, waiter) // We exclude the diagnostics that are expected to fail from the plan // diagnostics, and if an expected failure is not found, we add a new error diagnostic. - planDiags = run.ValidateExpectedFailures(planDiags) - run.Diagnostics = run.Diagnostics.Append(planDiags) + planDiags := run.ValidateExpectedFailures(originalDiags) + + if ctx.Verbose() { + // in verbose mode, we still add all the original diagnostics for + // display. + run.Diagnostics = run.Diagnostics.Append(originalDiags) + } else { + run.Diagnostics = run.Diagnostics.Append(planDiags) + } + if planDiags.HasErrors() { run.Status = moduletest.Error return