terraform test: check specific dependencies before skipping run blocks (#37204)

* terraform test: check specific dependencies before skipping run blocks

* fix data race

* complete comment
pull/37211/head^2
Liam Cervante 11 months ago committed by GitHub
parent 5eabf97554
commit 063b7cf62b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -339,6 +339,11 @@ func TestTest_Runs(t *testing.T) {
expectedOut: []string{"1 passed, 0 failed."},
code: 0,
},
"parallel-errors": {
expectedOut: []string{"1 passed, 1 failed, 1 skipped."},
expectedErr: []string{"Invalid condition run"},
code: 1,
},
}
for name, tc := range tcs {
t.Run(name, func(t *testing.T) {

@ -0,0 +1,11 @@
variable "input" {
type = string
}
resource "test_resource" "foo" {
value = var.input
}
output "value" {
value = test_resource.foo.value
}

@ -0,0 +1,32 @@
test {
parallel = true
}
run "one" {
state_key = "one"
variables {
input = "one"
}
assert {
condition = output.value
error_message = "something"
}
}
run "two" {
state_key = "two"
variables {
input = run.one.value
}
}
run "three" {
state_key = "three"
variables {
input = "three"
}
}

@ -105,9 +105,14 @@ func (n *NodeTestRun) testApply(ctx *EvalContext, variables terraform.InputValue
run.Status = newStatus
run.Diagnostics = run.Diagnostics.Append(moreDiags)
// Now we've successfully validated this run block, lets add it into
// our prior run outputs so future run blocks can access it.
ctx.SetOutput(run, outputVals)
// TODO(liamcervante): Temporarily lock the eval context here, while we
// still support dynamic provider evaluations. We won't need to lock this
// anymore once we don't have to keep all run blocks in the context all the
// time.
ctx.outputsLock.Lock()
run.Outputs = outputVals
ctx.outputsLock.Unlock()
// Only update the most recent run and state if the state was
// actually updated by this change. We want to use the run that

@ -7,7 +7,6 @@ import (
"context"
"fmt"
"log"
"maps"
"sort"
"sync"
@ -44,14 +43,8 @@ type TestFileState struct {
type EvalContext struct {
VariableCache *hcltest.VariableCache
// runOutputs is a mapping from run addresses to cty object values
// representing the collected output values from the module under test.
//
// This is used to allow run blocks to refer back to the output values of
// previous run blocks. It is passed into the Evaluate functions that
// validate the test assertions, and used when calculating values for
// variables within run blocks.
runOutputs map[addrs.Run]cty.Value
// runBlocks caches all the known run blocks that this EvalContext manages.
runBlocks map[addrs.Run]*moduletest.Run
outputsLock sync.Mutex
// configProviders is a cache of config keys mapped to all the providers
@ -99,7 +92,7 @@ func NewEvalContext(opts EvalContextOpts) *EvalContext {
cancelCtx, cancel := context.WithCancel(opts.CancelCtx)
stopCtx, stop := context.WithCancel(opts.StopCtx)
return &EvalContext{
runOutputs: make(map[addrs.Run]cty.Value),
runBlocks: make(map[addrs.Run]*moduletest.Run),
outputsLock: sync.Mutex{},
configProviders: make(map[string]map[string]bool),
providersLock: sync.Mutex{},
@ -313,17 +306,28 @@ func (ec *EvalContext) EvaluateRun(run *moduletest.Run, resultScope *lang.Scope,
return status, cty.ObjectVal(outputVals), diags
}
func (ec *EvalContext) SetOutput(run *moduletest.Run, output cty.Value) {
func (ec *EvalContext) AddRunBlock(run *moduletest.Run) {
ec.outputsLock.Lock()
defer ec.outputsLock.Unlock()
ec.runOutputs[run.Addr()] = output
ec.runBlocks[run.Addr()] = run
}
func (ec *EvalContext) GetOutputs() map[addrs.Run]cty.Value {
ec.outputsLock.Lock()
defer ec.outputsLock.Unlock()
outputCopy := make(map[addrs.Run]cty.Value, len(ec.runOutputs))
maps.Copy(outputCopy, ec.runOutputs) // don't use clone here, so we can return a non-nil map
outputCopy := make(map[addrs.Run]cty.Value, len(ec.runBlocks))
for addr, run := range ec.runBlocks {
// Normally, we should check the run.Status before reading the outputs
// to make sure they are actually valid. But, for now we are tracking
// a difference between run blocks not yet executed and run blocks that
// do not exist by setting cty.NilVal for run blocks that haven't
// executed yet so we do actually just want to include all run blocks
// here.
// TODO(liamcervante): Validate run status before adding to this map
// once providers and variables are in the graph and we don't need to
// rely on this hack.
outputCopy[addr] = run.Outputs
}
return outputCopy
}
@ -378,6 +382,60 @@ func (ec *EvalContext) GetFileState(key string) *TestFileState {
return ec.FileStates[key]
}
// ReferencesCompleted returns true if all the listed references were actually
// executed successfully. This allows nodes in the graph to decide if they
// should execute or not based on the status of their references.
//
// TODO(liamcervante): Expand this with providers and variables once we've added
// them to the graph.
func (ec *EvalContext) ReferencesCompleted(refs []*addrs.Reference) bool {
for _, ref := range refs {
switch ref := ref.Subject.(type) {
case addrs.Run:
ec.outputsLock.Lock()
if run, ok := ec.runBlocks[ref]; ok {
if run.Status != moduletest.Pass && run.Status != moduletest.Fail {
ec.outputsLock.Unlock()
// see also prior runs completed
return false
}
}
ec.outputsLock.Unlock()
}
}
return true
}
// PriorRunsCompleted checks a list of run blocks against our internal log of
// completed run blocks and makes sure that any that do exist successfully
// executed to completion.
//
// Note that run blocks that are not in the list indicate a bad reference,
// which we ignore here. This is actually the problem of the caller to identify
// and error.
func (ec *EvalContext) PriorRunsCompleted(runs map[addrs.Run]*moduletest.Run) bool {
ec.outputsLock.Lock()
defer ec.outputsLock.Unlock()
for addr := range runs {
if run, ok := ec.runBlocks[addr]; ok {
if run.Status != moduletest.Pass && run.Status != moduletest.Fail {
// pass and fail indicate the run block still executed the plan
// or apply operate and wrote outputs. fail means the
// post-execution checks failed, but we still had data to check.
// this is in contrast to pending, skip, or error which indicate
// that we never even wrote data for this run block.
return false
}
}
}
return true
}
// evaluationData augments an underlying lang.Data -- presumably resulting
// from a terraform.Context.PlanAndEval or terraform.Context.ApplyAndEval call --
// with results from prior runs that should therefore be available when

@ -730,16 +730,21 @@ func TestEvalContext_Evaluate(t *testing.T) {
ModuleConfig: config,
}
priorOutputs := make(map[addrs.Run]cty.Value, len(test.priorOutputs))
for name, val := range test.priorOutputs {
priorOutputs[addrs.Run{Name: name}] = val
}
testCtx := NewEvalContext(EvalContextOpts{
CancelCtx: context.Background(),
StopCtx: context.Background(),
})
testCtx.runOutputs = priorOutputs
testCtx.runBlocks = make(map[addrs.Run]*moduletest.Run)
for ix, block := range file.Runs[:len(file.Runs)-1] {
// all prior run blocks we just mark as having passed, and with
// the output data specified by the test
run := moduletest.NewRun(block, config, ix)
run.Status = moduletest.Pass
run.Outputs = test.priorOutputs[run.Name]
testCtx.runBlocks[run.Addr()] = run
}
gotStatus, gotOutputs, diags := testCtx.EvaluateRun(run, planScope, test.testOnlyVars)
if got, want := gotStatus, test.expectedStatus; got != want {

@ -23,8 +23,9 @@ var (
)
type NodeTestRun struct {
run *moduletest.Run
opts *graphOptions
run *moduletest.Run
priorRuns map[addrs.Run]*moduletest.Run
opts *graphOptions
}
func (n *NodeTestRun) Run() *moduletest.Run {
@ -62,10 +63,9 @@ func (n *NodeTestRun) Execute(evalCtx *EvalContext) {
file.UpdateStatus(run.Status)
}()
if file.GetStatus() == moduletest.Error {
// If the overall test file has errored, we don't keep trying to
// execute tests. Instead, we mark all remaining run blocks as
// skipped, print the status, and move on.
if !evalCtx.PriorRunsCompleted(n.priorRuns) || !evalCtx.ReferencesCompleted(n.References()) {
// If any of our prior runs or references weren't completed successfully
// then we will just skip this run block.
run.Status = moduletest.Skip
return
}

@ -73,9 +73,14 @@ func (n *NodeTestRun) testPlan(ctx *EvalContext, variables terraform.InputValues
run.Status = newStatus
run.Diagnostics = run.Diagnostics.Append(moreDiags)
// Now we've successfully validated this run block, lets add it into
// our prior run outputs so future run blocks can access it.
ctx.SetOutput(run, outputVals)
// TODO(liamcervante): Temporarily lock the eval context here, while we
// still support dynamic provider evaluations. We won't need to lock this
// anymore once we don't have to keep all run blocks in the context all the
// time.
ctx.outputsLock.Lock()
run.Outputs = outputVals
ctx.outputsLock.Unlock()
}
func (n *NodeTestRun) plan(ctx *EvalContext, tfCtx *terraform.Context, variables terraform.InputValues, waiter *operationWaiter) (*lang.Scope, *plans.Plan, tfdiags.Diagnostics) {

@ -4,8 +4,6 @@
package graph
import (
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/internal/dag"
"github.com/hashicorp/terraform/internal/moduletest"
"github.com/hashicorp/terraform/internal/states"
@ -31,7 +29,7 @@ func (e *EvalContextTransformer) Transform(graph *terraform.Graph) error {
// TODO(liamcervante): Once providers are embedded in the graph
// we don't need to track run blocks in this way anymore.
ctx.SetOutput(run, cty.NilVal)
ctx.AddRunBlock(run)
// We also want to set an empty state file for every state key
// we're going to be executing within the graph.

@ -4,7 +4,9 @@
package graph
import (
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/dag"
"github.com/hashicorp/terraform/internal/moduletest"
"github.com/hashicorp/terraform/internal/terraform"
)
@ -18,7 +20,7 @@ func (t *TestRunTransformer) Transform(g *terraform.Graph) error {
// Create and add nodes for each run
var nodes []*NodeTestRun
for _, run := range t.opts.File.Runs {
node := &NodeTestRun{run: run, opts: t.opts}
node := &NodeTestRun{run: run, opts: t.opts, priorRuns: make(map[addrs.Run]*moduletest.Run)}
g.Add(node)
nodes = append(nodes, node)
}
@ -49,6 +51,7 @@ func (t *TestRunTransformer) controlParallelism(g *terraform.Graph, nodes []*Nod
// Connect to all previous runs
for j := 0; j < i; j++ {
g.Connect(dag.BasicEdge(node, nodes[j]))
node.priorRuns[nodes[j].run.Addr()] = nodes[j].run
}
// Connect to all subsequent runs
@ -66,6 +69,7 @@ func (t *TestRunTransformer) connectSameStateRuns(g *terraform.Graph, nodes []*N
}
for _, runs := range stateRuns {
for i := 1; i < len(runs); i++ {
runs[i].priorRuns[runs[i-1].run.Addr()] = runs[i-1].run
g.Connect(dag.BasicEdge(runs[i], runs[i-1]))
}
}

@ -8,6 +8,7 @@ import (
"time"
"github.com/hashicorp/hcl/v2"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/configs"
@ -39,6 +40,12 @@ type Run struct {
Index int
Status Status
// Outputs are set by the Terraform Test graph once this run block is
// executed. Callers should only access this if the Status field is set to
// Pass or Fail as both of these cases indicate the run block was executed
// successfully, and actually had values to write.
Outputs cty.Value
Diagnostics tfdiags.Diagnostics
// ExecutionMeta captures metadata about how the test run was executed.
@ -57,6 +64,8 @@ type Run struct {
func NewRun(config *configs.TestRun, moduleConfig *configs.Config, index int) *Run {
// Make a copy the module configuration variables and provider configuration maps
// so that the run can modify the map safely.
// TODO(liamcervante): We won't need to do this once variables and providers
// are executed within the graph.
newModuleConfig := *moduleConfig
if moduleConfig.Module != nil {
newModule := *moduleConfig.Module

Loading…
Cancel
Save