terraform test: don't allow diags from executable nodes (#37193)

pull/37194/head
Liam Cervante 11 months ago committed by GitHub
parent dd4bf55dca
commit 19540e30fe
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -7,13 +7,12 @@ import (
"context"
"fmt"
"log"
"maps"
"path/filepath"
"slices"
"github.com/zclconf/go-cty/cty"
"maps"
"github.com/hashicorp/terraform/internal/backend/backendrun"
"github.com/hashicorp/terraform/internal/command/junit"
"github.com/hashicorp/terraform/internal/command/views"
@ -267,7 +266,7 @@ func (runner *TestFileRunner) Test(file *moduletest.File) {
}
// walk and execute the graph
diags = runner.walkGraph(g)
diags = diags.Append(runner.walkGraph(g))
// If the graph walk was terminated, we don't want to add the diagnostics.
// The error the user receives will just be:
@ -287,7 +286,7 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph) tfdiags.Diagnostics
sem := runner.Suite.semaphore
// Walk the graph.
walkFn := func(v dag.Vertex) (diags tfdiags.Diagnostics) {
walkFn := func(v dag.Vertex) tfdiags.Diagnostics {
if runner.EvalContext.Cancelled() {
// If the graph walk has been cancelled, the node should just return immediately.
// For now, this means a hard stop has been requested, in this case we don't
@ -295,7 +294,7 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph) tfdiags.Diagnostics
// just show up as pending in the printed summary. We will quickly
// just mark the overall file status has having errored to indicate
// it was interrupted.
return
return nil
}
// the walkFn is called asynchronously, and needs to be recovered
@ -312,18 +311,7 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph) tfdiags.Diagnostics
log.Printf("[ERROR] vertex %q panicked", dag.VertexName(v))
panic(r) // re-panic
}
if diags.HasErrors() {
for _, diag := range diags {
if diag.Severity() == tfdiags.Error {
desc := diag.Description()
log.Printf("[ERROR] vertex %q error: %s", dag.VertexName(v), desc.Summary)
}
}
log.Printf("[TRACE] vertex %q: visit complete, with errors", dag.VertexName(v))
} else {
log.Printf("[TRACE] vertex %q: visit complete", dag.VertexName(v))
}
log.Printf("[TRACE] vertex %q: visit complete", dag.VertexName(v))
}()
// Acquire a lock on the semaphore
@ -331,9 +319,9 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph) tfdiags.Diagnostics
defer sem.Release()
if executable, ok := v.(graph.GraphNodeExecutable); ok {
diags = executable.Execute(runner.EvalContext)
executable.Execute(runner.EvalContext)
}
return
return nil
}
return g.AcyclicGraph.Walk(walkFn)

@ -34,7 +34,7 @@ func (n *NodeStateCleanup) Name() string {
// This function should never return non-fatal error diagnostics, as that would
// prevent further cleanup from happening. Instead, the diagnostics
// will be rendered directly.
func (n *NodeStateCleanup) Execute(evalCtx *EvalContext) tfdiags.Diagnostics {
func (n *NodeStateCleanup) Execute(evalCtx *EvalContext) {
file := n.opts.File
state := evalCtx.GetFileState(n.stateKey)
log.Printf("[TRACE] TestStateManager: cleaning up state for %s", file.Name)
@ -42,7 +42,7 @@ func (n *NodeStateCleanup) Execute(evalCtx *EvalContext) tfdiags.Diagnostics {
if evalCtx.Cancelled() {
// Don't try and clean anything up if the execution has been cancelled.
log.Printf("[DEBUG] TestStateManager: skipping state cleanup for %s due to cancellation", file.Name)
return nil
return
}
empty := true
@ -61,7 +61,7 @@ func (n *NodeStateCleanup) Execute(evalCtx *EvalContext) tfdiags.Diagnostics {
// The state can be empty for a run block that just executed a plan
// command, or a run block that only read data sources. We'll just
// skip empty run blocks.
return nil
return
}
if state.Run == nil {
@ -78,7 +78,7 @@ func (n *NodeStateCleanup) Execute(evalCtx *EvalContext) tfdiags.Diagnostics {
evalCtx.Renderer().DestroySummary(diags, nil, file, state.State)
// intentionally return nil to allow further cleanup
return nil
return
}
TransformConfigForRun(evalCtx, state.Run, file)
@ -100,7 +100,6 @@ func (n *NodeStateCleanup) Execute(evalCtx *EvalContext) tfdiags.Diagnostics {
file.UpdateStatus(moduletest.Error)
}
evalCtx.Renderer().DestroySummary(destroyDiags, state.Run, file, updated)
return nil
}
func (n *NodeStateCleanup) destroy(ctx *EvalContext, runNode *NodeTestRun, waiter *operationWaiter) (*states.State, tfdiags.Diagnostics) {

@ -44,10 +44,9 @@ func (n *NodeTestRun) References() []*addrs.Reference {
// Execute executes the test run block and update the status of the run block
// based on the result of the execution.
func (n *NodeTestRun) Execute(evalCtx *EvalContext) tfdiags.Diagnostics {
func (n *NodeTestRun) Execute(evalCtx *EvalContext) {
log.Printf("[TRACE] TestFileRunner: executing run block %s/%s", n.File().Name, n.run.Name)
startTime := time.Now().UTC()
var diags tfdiags.Diagnostics
file, run := n.File(), n.run
// At the end of the function, we'll update the status of the file based on
@ -62,21 +61,21 @@ func (n *NodeTestRun) Execute(evalCtx *EvalContext) tfdiags.Diagnostics {
// execute tests. Instead, we mark all remaining run blocks as
// skipped, print the status, and move on.
run.Status = moduletest.Skip
return diags
return
}
if evalCtx.Cancelled() {
// A cancellation signal has been received.
// Don't do anything, just give up and return immediately.
// The surrounding functions should stop this even being called, but in
// case of race conditions or something we can still verify this.
return diags
return
}
if evalCtx.Stopped() {
// Then the test was requested to be stopped, so we just mark each
// following test as skipped, print the status, and move on.
run.Status = moduletest.Skip
return diags
return
}
// Create a waiter which handles waiting for terraform operations to complete.
@ -94,7 +93,7 @@ func (n *NodeTestRun) Execute(evalCtx *EvalContext) tfdiags.Diagnostics {
})
if cancelled {
diags = diags.Append(tfdiags.Sourceless(tfdiags.Error, "Test interrupted", "The test operation could not be completed due to an interrupt signal. Please read the remaining diagnostics carefully for any sign of failed state cleanup or dangling resources."))
n.run.Diagnostics = n.run.Diagnostics.Append(tfdiags.Sourceless(tfdiags.Error, "Test interrupted", "The test operation could not be completed due to an interrupt signal. Please read the remaining diagnostics carefully for any sign of failed state cleanup or dangling resources."))
}
// If we got far enough to actually attempt to execute the run then
@ -103,7 +102,6 @@ func (n *NodeTestRun) Execute(evalCtx *EvalContext) tfdiags.Diagnostics {
Start: startTime,
Duration: time.Since(startTime),
}
return diags
}
func (n *NodeTestRun) execute(ctx *EvalContext, waiter *operationWaiter) {

@ -73,9 +73,9 @@ func validateRunConfigs(g *terraform.Graph) error {
// dynamicNode is a helper node which can be added to the graph to execute
// a dynamic function at some desired point in the graph.
type dynamicNode struct {
eval func(*EvalContext) tfdiags.Diagnostics
eval func(*EvalContext)
}
func (n *dynamicNode) Execute(evalCtx *EvalContext) tfdiags.Diagnostics {
return n.eval(evalCtx)
func (n *dynamicNode) Execute(evalCtx *EvalContext) {
n.eval(evalCtx)
}

@ -7,17 +7,17 @@ import (
"fmt"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/terraform/internal/configs"
"github.com/hashicorp/terraform/internal/dag"
"github.com/hashicorp/terraform/internal/moduletest"
hcltest "github.com/hashicorp/terraform/internal/moduletest/hcl"
"github.com/hashicorp/terraform/internal/states"
"github.com/hashicorp/terraform/internal/terraform"
"github.com/hashicorp/terraform/internal/tfdiags"
)
type GraphNodeExecutable interface {
Execute(ctx *EvalContext) tfdiags.Diagnostics
Execute(ctx *EvalContext)
}
// TestFileState is a helper struct that just maps a run block to the state that
@ -65,10 +65,8 @@ func (t *TestConfigTransformer) Transform(g *terraform.Graph) error {
func (t *TestConfigTransformer) addRootConfigNode(g *terraform.Graph, statesMap map[string]*TestFileState) *dynamicNode {
rootConfigNode := &dynamicNode{
eval: func(ctx *EvalContext) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
eval: func(ctx *EvalContext) {
ctx.FileStates = statesMap
return diags
},
}
g.Add(rootConfigNode)

@ -7,7 +7,6 @@ import (
"github.com/hashicorp/terraform/internal/configs"
"github.com/hashicorp/terraform/internal/dag"
"github.com/hashicorp/terraform/internal/terraform"
"github.com/hashicorp/terraform/internal/tfdiags"
)
// TestProvidersTransformer is a GraphTransformer that gathers all the providers
@ -88,11 +87,10 @@ func (t *TestProvidersTransformer) transformSingleConfig(config *configs.Config)
func (t *TestProvidersTransformer) createRootNode(g *terraform.Graph, providerMap map[*NodeTestRun]map[string]bool) *dynamicNode {
node := &dynamicNode{
eval: func(ctx *EvalContext) tfdiags.Diagnostics {
eval: func(ctx *EvalContext) {
for node, providers := range providerMap {
ctx.SetProviders(node.run, providers)
}
return nil
},
}
g.Add(node)

@ -9,7 +9,6 @@ import (
"github.com/hashicorp/terraform/internal/dag"
"github.com/hashicorp/terraform/internal/moduletest"
"github.com/hashicorp/terraform/internal/terraform"
"github.com/hashicorp/terraform/internal/tfdiags"
)
// TestStateCleanupTransformer is a GraphTransformer that adds a cleanup node
@ -75,10 +74,8 @@ func (t *TestStateCleanupTransformer) Transform(g *terraform.Graph) error {
func (t *TestStateCleanupTransformer) addRootCleanupNode(g *terraform.Graph) *dynamicNode {
rootCleanupNode := &dynamicNode{
eval: func(ctx *EvalContext) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
eval: func(ctx *EvalContext) {
ctx.Renderer().File(t.opts.File, moduletest.TearDown)
return diags
},
}
g.Add(rootCleanupNode)

Loading…
Cancel
Save