Continue test execution after an expected failure (#36447)

pull/36459/head
Samsondeen 1 year ago committed by GitHub
parent ff03b00013
commit 4161e88391
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -0,0 +1,5 @@
kind: ENHANCEMENTS
body: Terraform Test: Continue subsequent test execution when an expected failure is not encountered.
time: 2025-02-06T16:20:53.83763+01:00
custom:
Issue: "34969"

@ -282,7 +282,7 @@ func (runner *TestFileRunner) Test(file *moduletest.File) {
}
// walk and execute the graph
diags = runner.walkGraph(graph)
diags = runner.walkGraph(graph, file)
// If the graph walk was terminated, we don't want to add the diagnostics.
// The error the user receives will just be:
@ -298,7 +298,7 @@ func (runner *TestFileRunner) Test(file *moduletest.File) {
}
// walkGraph goes through the graph and execute each run it finds.
func (runner *TestFileRunner) walkGraph(g *terraform.Graph) tfdiags.Diagnostics {
func (runner *TestFileRunner) walkGraph(g *terraform.Graph, file *moduletest.File) tfdiags.Diagnostics {
sem := runner.Suite.semaphore
// Walk the graph.
@ -346,7 +346,7 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph) tfdiags.Diagnostics
defer sem.Release()
switch v := v.(type) {
case *graph.NodeTestRun:
case *graph.NodeTestRun: // NodeTestRun is also executable, so it has to be first.
file := v.File()
run := v.Run()
if file.GetStatus() == moduletest.Error {
@ -374,7 +374,11 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph) tfdiags.Diagnostics
if diags.HasErrors() {
return diags
}
// continue the execution of the test run.
startTime := time.Now().UTC()
runner.run(run, file, startTime)
runner.Suite.View.Run(run, file, moduletest.Complete, 0)
file.UpdateStatus(run.Status)
case graph.GraphNodeExecutable:
diags = v.Execute(runner.EvalContext)
return diags
@ -382,75 +386,56 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph) tfdiags.Diagnostics
// If the vertex isn't a test run or executable, we'll just skip it.
return
}
return
}
// We already know that the vertex is a test run
runNode := v.(*graph.NodeTestRun)
file := runNode.File()
run := runNode.Run()
key := run.GetStateKey()
if run.Config.ConfigUnderTest != nil {
if key == moduletest.MainStateIdentifier {
// This is bad. It means somehow the module we're loading has
// the same key as main state and we're about to corrupt things.
run.Diagnostics = run.Diagnostics.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid module source",
Detail: fmt.Sprintf("The source for the selected module evaluated to %s which should not be possible. This is a bug in Terraform - please report it!", key),
Subject: run.Config.Module.DeclRange.Ptr(),
})
run.Status = moduletest.Error
file.UpdateStatus(moduletest.Error)
return
}
}
startTime := time.Now().UTC()
state, updatedState := runner.run(run, file, runner.EvalContext.GetFileState(key).State)
runDuration := time.Since(startTime)
if updatedState {
// Only update the most recent run and state if the state was
// actually updated by this change. We want to use the run that
// most recently updated the tracked state as the cleanup
// configuration.
runner.EvalContext.SetFileState(key, &graph.TestFileState{
Run: run,
State: state,
})
}
return g.AcyclicGraph.Walk(walkFn)
}
func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, startTime time.Time) {
log.Printf("[TRACE] TestFileRunner: executing run block %s/%s", file.Name, run.Name)
defer func() {
// If we got far enough to actually execute the run then we'll give
// the view some additional metadata about the execution.
run.ExecutionMeta = &moduletest.RunExecutionMeta{
Start: startTime,
Duration: runDuration,
Duration: time.Since(startTime),
}
runner.Suite.View.Run(run, file, moduletest.Complete, 0)
file.UpdateStatus(run.Status)
return
}
return g.AcyclicGraph.Walk(walkFn)
}
}()
func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, state *states.State) (*states.State, bool) {
log.Printf("[TRACE] TestFileRunner: executing run block %s/%s", file.Name, run.Name)
key := run.GetStateKey()
if run.Config.ConfigUnderTest != nil {
if key == moduletest.MainStateIdentifier {
// This is bad. It means somehow the module we're loading has
// the same key as main state and we're about to corrupt things.
run.Diagnostics = run.Diagnostics.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid module source",
Detail: fmt.Sprintf("The source for the selected module evaluated to %s which should not be possible. This is a bug in Terraform - please report it!", key),
Subject: run.Config.Module.DeclRange.Ptr(),
})
run.Status = moduletest.Error
file.UpdateStatus(moduletest.Error)
return
}
}
state := runner.EvalContext.GetFileState(key).State
config := run.ModuleConfig
if runner.Suite.Cancelled {
// Don't do anything, just give up and return immediately.
// The surrounding functions should stop this even being called, but in
// case of race conditions or something we can still verify this.
return state, false
return
}
if runner.Suite.Stopped {
// Basically the same as above, except we'll be a bit nicer.
run.Status = moduletest.Skip
return state, false
return
}
start := time.Now().UTC().UnixMilli()
@ -459,35 +444,31 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
run.Diagnostics = run.Diagnostics.Append(run.Config.Validate(config))
if run.Diagnostics.HasErrors() {
run.Status = moduletest.Error
return state, false
return
}
configDiags := graph.TransformConfigForTest(runner.EvalContext, run, file)
run.Diagnostics = run.Diagnostics.Append(configDiags)
if configDiags.HasErrors() {
run.Status = moduletest.Error
return state, false
return
}
validateDiags := runner.validate(run, file, start)
run.Diagnostics = run.Diagnostics.Append(validateDiags)
if validateDiags.HasErrors() {
run.Status = moduletest.Error
return state, false
return
}
references, referenceDiags := run.GetReferences()
run.Diagnostics = run.Diagnostics.Append(referenceDiags)
if referenceDiags.HasErrors() {
run.Status = moduletest.Error
return state, false
}
// already validated during static analysis
references, _ := run.GetReferences()
variables, variableDiags := runner.GetVariables(run, references, true)
run.Diagnostics = run.Diagnostics.Append(variableDiags)
if variableDiags.HasErrors() {
run.Status = moduletest.Error
return state, false
return
}
// FilterVariablesToModule only returns warnings, so we don't check the
@ -498,7 +479,7 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
tfCtx, ctxDiags := terraform.NewContext(runner.Suite.Opts)
run.Diagnostics = run.Diagnostics.Append(ctxDiags)
if ctxDiags.HasErrors() {
return state, false
return
}
planScope, plan, planDiags := runner.plan(tfCtx, config, state, run, file, setVariables, references, start)
@ -508,7 +489,7 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
run.Diagnostics = run.Diagnostics.Append(planDiags)
if planDiags.HasErrors() {
run.Status = moduletest.Error
return state, false
return
}
runner.AddVariablesToConfig(run, variables)
@ -549,8 +530,7 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
// Now we've successfully validated this run block, lets add it into
// our prior run outputs so future run blocks can access it.
runner.EvalContext.SetOutput(run, outputVals)
return state, false
return
}
// Otherwise any error during the planning prevents our apply from
@ -559,7 +539,7 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
run.Diagnostics = run.Diagnostics.Append(planDiags)
if planDiags.HasErrors() {
run.Status = moduletest.Error
return state, false
return
}
// Since we're carrying on an executing the apply operation as well, we're
@ -579,14 +559,16 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
applyScope, updated, applyDiags := runner.apply(tfCtx, plan, state, run, file, moduletest.Running, start, variables)
// Remove expected diagnostics, and add diagnostics in case anything that should have failed didn't.
applyDiags = run.ValidateExpectedFailures(applyDiags)
run.Diagnostics = run.Diagnostics.Append(applyDiags)
if applyDiags.HasErrors() {
run.Status = moduletest.Error
// We'll also update the run status based on the presence of errors or missing expected failures.
failOrErr := runner.checkForMissingExpectedFailures(run, applyDiags)
if failOrErr {
// Even though the apply operation failed, the graph may have done
// partial updates and the returned state should reflect this.
return updated, true
runner.EvalContext.SetFileState(key, &graph.TestFileState{
Run: run,
State: updated,
})
return
}
runner.AddVariablesToConfig(run, variables)
@ -628,7 +610,37 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
// our prior run outputs so future run blocks can access it.
runner.EvalContext.SetOutput(run, outputVals)
return updated, true
// Only update the most recent run and state if the state was
// actually updated by this change. We want to use the run that
// most recently updated the tracked state as the cleanup
// configuration.
runner.EvalContext.SetFileState(key, &graph.TestFileState{
Run: run,
State: updated,
})
}
// checkForMissingExpectedFailures checks for missing expected failures in the diagnostics.
// It updates the run status based on the presence of errors or missing expected failures.
func (runner *TestFileRunner) checkForMissingExpectedFailures(run *moduletest.Run, diags tfdiags.Diagnostics) (failOrErr bool) {
// Retrieve and append diagnostics that are either unrelated to expected failures
// or report missing expected failures.
unexpectedDiags := run.ValidateExpectedFailures(diags)
run.Diagnostics = run.Diagnostics.Append(unexpectedDiags)
for _, diag := range unexpectedDiags {
// // If any diagnostic indicates a missing expected failure, set the run status to fail.
if ok := moduletest.DiagnosticFromMissingExpectedFailure(diag); ok {
run.Status = run.Status.Merge(moduletest.Fail)
continue
}
// upgrade the run status to error if there still are other errors in the diagnostics
if diag.Severity() == tfdiags.Error {
run.Status = run.Status.Merge(moduletest.Error)
break
}
}
return run.Status > moduletest.Pass
}
func (runner *TestFileRunner) validate(run *moduletest.Run, file *moduletest.File, start int64) tfdiags.Diagnostics {

@ -35,7 +35,7 @@ func TestTest_Runs(t *testing.T) {
code int
initCode int
skip bool
desc string
description string
}{
"simple_pass": {
expectedOut: []string{"1 passed, 0 failed."},
@ -68,7 +68,7 @@ func TestTest_Runs(t *testing.T) {
args: []string{"-parallelism", "1"},
expectedOut: []string{"1 passed, 0 failed."},
code: 0,
desc: "simple_pass with parallelism set to 1",
description: "simple_pass with parallelism set to 1",
},
"simple_pass_very_nested_alternate": {
override: "simple_pass_very_nested",
@ -112,10 +112,6 @@ func TestTest_Runs(t *testing.T) {
expectedOut: []string{"1 passed, 0 failed."},
code: 0,
},
"expect_failures_outputs": {
expectedOut: []string{"1 passed, 0 failed."},
code: 0,
},
"expect_failures_resources": {
expectedOut: []string{"1 passed, 0 failed."},
code: 0,
@ -405,7 +401,7 @@ func TestTest_Runs(t *testing.T) {
Meta: meta,
}
code := c.Run(tc.args)
code := c.Run(append(tc.args, "-no-color"))
output := done(t)
if code != tc.code {
@ -1843,6 +1839,7 @@ the apply operation could not be executed and so the overall test case will
be marked as a failure and the original diagnostic included in the test
report.
run "no_run"... skip
input.tftest.hcl... tearing down
input.tftest.hcl... fail
output.tftest.hcl... in progress
@ -1875,7 +1872,7 @@ test report.
resource.tftest.hcl... tearing down
resource.tftest.hcl... fail
Failure! 1 passed, 3 failed.
Failure! 1 passed, 3 failed, 1 skipped.
`
actualOut := output.Stdout()
if diff := cmp.Diff(expectedOut, actualOut); len(diff) > 0 {
@ -1922,6 +1919,71 @@ input must contain the character 'b'
}
}
func TestTest_MissingExpectedFailuresDuringApply(t *testing.T) {
// Test asserting that the test run fails, but not errors out, when expected failures are not present during apply.
// This lets subsequent runs continue to execute and the file to be marked as failed.
td := t.TempDir()
testCopyDir(t, testFixturePath(path.Join("test", "expect_failures_during_apply")), td)
defer testChdir(t, td)()
provider := testing_command.NewProvider(nil)
view, done := testView(t)
c := &TestCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(provider.Provider),
View: view,
},
}
code := c.Run([]string{"-no-color"})
output := done(t)
if code == 0 {
t.Errorf("expected status code 0 but got %d", code)
}
expectedOut := `main.tftest.hcl... in progress
run "test"... fail
run "follow-up"... pass
Warning: Value for undeclared variable
on main.tftest.hcl line 16, in run "follow-up":
16: input = "does not matter"
The module under test does not declare a variable named "input", but it is
declared in run block "follow-up".
main.tftest.hcl... tearing down
main.tftest.hcl... fail
Failure! 1 passed, 1 failed.
`
actualOut := output.Stdout()
if diff := cmp.Diff(expectedOut, actualOut); len(diff) > 0 {
t.Errorf("output didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedOut, actualOut, diff)
}
expectedErr := `
Error: Missing expected failure
on main.tftest.hcl line 7, in run "test":
7: output.output
The checkable object, output.output, was expected to report an error but did
not.
`
actualErr := output.Stderr()
if diff := cmp.Diff(actualErr, expectedErr); len(diff) > 0 {
t.Errorf("output didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedErr, actualErr, diff)
}
if provider.ResourceCount() > 0 {
t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString())
}
}
func TestTest_UnknownAndNulls(t *testing.T) {
tcs := map[string]struct {

@ -0,0 +1,13 @@
locals {
input = uuid() # using UUID to ensure that plan phase will return an unknown value
}
output "output" {
value = local.input
precondition {
condition = local.input != ""
error_message = "this should not fail during the apply phase"
}
}

@ -0,0 +1,18 @@
run "test" {
command = apply
// We are expecting the output to fail during apply, but it will not, so the test will fail.
expect_failures = [
output.output
]
}
// this should still run
run "follow-up" {
command = apply
variables {
input = "does not matter"
}
}

@ -14,3 +14,17 @@ run "input_failure" {
]
}
// This should not run because the previous run block is expected to error, thus
// terminating the test file.
run "no_run" {
variables {
input = "abc"
}
assert {
condition = var.input == "abc"
error_message = "should not run"
}
}

@ -320,11 +320,6 @@ func diagsForEphemeralResources(refs []*addrs.Reference) (diags tfdiags.Diagnost
func (ec *EvalContext) SetFileState(key string, state *TestFileState) {
ec.stateLock.Lock()
defer ec.stateLock.Unlock()
fileState := ec.FileStates[key]
if fileState != nil {
ec.FileStates[key] = state
return
}
ec.FileStates[key] = &TestFileState{
Run: state.Run,
State: state.State,

@ -33,7 +33,7 @@ func (t *TestRunTransformer) Transform(g *terraform.Graph) error {
// Connect nodes based on dependencies
if diags := t.connectDependencies(g, nodes); diags.HasErrors() {
return tfdiags.NonFatalError{Diagnostics: diags}
return tfdiags.DiagnosticsAsError{Diagnostics: diags}
}
// Runs with the same state key inherently depend on each other, so we

@ -548,9 +548,32 @@ func (run *Run) ValidateExpectedFailures(originals tfdiags.Diagnostics) tfdiags.
Summary: "Missing expected failure",
Detail: fmt.Sprintf("The checkable object, %s, was expected to report an error but did not.", addr.String()),
Subject: sourceRanges.Get(addr).ToHCL().Ptr(),
Extra: missingExpectedFailure(true),
})
}
}
return diags
}
// DiagnosticExtraFromMissingExpectedFailure provides an interface for diagnostic ExtraInfo to
// denote that a diagnostic was generated as a result of a missing expected failure.
type DiagnosticExtraFromMissingExpectedFailure interface {
DiagnosticFromMissingExpectedFailure() bool
}
// DiagnosticFromMissingExpectedFailure checks if the provided diagnostic
// is a result of a missing expected failure.
func DiagnosticFromMissingExpectedFailure(diag tfdiags.Diagnostic) bool {
maybe := tfdiags.ExtraInfo[DiagnosticExtraFromMissingExpectedFailure](diag)
if maybe == nil {
return false
}
return maybe.DiagnosticFromMissingExpectedFailure()
}
type missingExpectedFailure bool
func (missingExpectedFailure) DiagnosticFromMissingExpectedFailure() bool {
return true
}

@ -57,7 +57,7 @@ func (diags Diagnostics) Append(new ...interface{}) Diagnostics {
diags = append(diags, ti)
case Diagnostics:
diags = append(diags, ti...) // flatten
case diagnosticsAsError:
case DiagnosticsAsError:
diags = diags.Append(ti.Diagnostics) // unwrap
case NonFatalError:
diags = diags.Append(ti.Diagnostics) // unwrap
@ -111,7 +111,7 @@ func diagnosticsForError(err error) []Diagnostic {
// If we've wrapped a Diagnostics in an error then we'll unwrap
// it and add it directly.
var asErr diagnosticsAsError
var asErr DiagnosticsAsError
if errors.As(err, &asErr) {
return asErr.Diagnostics
}
@ -206,7 +206,7 @@ func (diags Diagnostics) Err() error {
if !diags.HasErrors() {
return nil
}
return diagnosticsAsError{diags}
return DiagnosticsAsError{diags}
}
// ErrWithWarnings is similar to Err except that it will also return a non-nil
@ -257,11 +257,12 @@ func (diags Diagnostics) Sort() {
sort.Stable(sortDiagnostics(diags))
}
type diagnosticsAsError struct {
// DiagnosticsAsError embeds diagnostics, and satisfies the error interface.
type DiagnosticsAsError struct {
Diagnostics
}
func (dae diagnosticsAsError) Error() string {
func (dae DiagnosticsAsError) Error() string {
diags := dae.Diagnostics
switch {
case len(diags) == 0:
@ -291,7 +292,7 @@ func (dae diagnosticsAsError) Error() string {
// WrappedErrors is an implementation of errwrap.Wrapper so that an error-wrapped
// diagnostics object can be picked apart by errwrap-aware code.
func (dae diagnosticsAsError) WrappedErrors() []error {
func (dae DiagnosticsAsError) WrappedErrors() []error {
var errs []error
for _, diag := range dae.Diagnostics {
if wrapper, isErr := diag.(nativeError); isErr {

Loading…
Cancel
Save