Complete functionality for the unknown instances processing path for resources (#34945)

* Expand functionality of the unknown count/for_each path when processing resources

* remove reference to experiment in tests

* fix race tests

* update tests

* address comments
pull/34971/head
Liam Cervante 2 years ago committed by GitHub
parent d8f22d3c54
commit cbee7469b0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -197,6 +197,9 @@ func (d *Deferred) HaveAnyDeferrals() bool {
// method will panic in that case. Callers should always test whether a resource
// instance action should be deferred _before_ reporting that it has been.
func (d *Deferred) ShouldDeferResourceInstanceChanges(addr addrs.AbsResourceInstance) bool {
d.mu.Lock()
defer d.mu.Unlock()
if d.externalDependencyDeferred {
// This is an easy case: _all_ actions must be deferred.
return true

File diff suppressed because it is too large Load Diff

@ -0,0 +1,329 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package terraform
import (
"fmt"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/tfdiags"
)
// This file is a temporary split from the node_resource_plan.go file. We handle
// the unknown instances branch of execution within here, while this is still
// being developed.
//
// We have split the files to make structuring the code easier, eventually once
// the functions within this file are production ready, we will merge them back
// into the node_resource_plan.go file.
// dynamicExpandPartial is a variant of dynamicExpand that we use when deferred
// actions are enabled for the current plan.
//
// Once deferred actions are more stable and robust in the stacks runtime, it
// would be nice to integrate this logic a little better with the main
// DynamicExpand logic, but it's separate for now to minimize the risk of
// stacks-specific behavior impacting configurations that are not opted into it.
func (n *nodeExpandPlannableResource) dynamicExpandPartial(ctx EvalContext, knownModules []addrs.ModuleInstance, partialModules addrs.Set[addrs.PartialExpandedModule], imports addrs.Map[addrs.AbsResourceInstance, string]) (*Graph, tfdiags.Diagnostics) {
var g Graph
var diags tfdiags.Diagnostics
knownResources := addrs.MakeSet[addrs.AbsResourceInstance]()
partialResources := addrs.MakeSet[addrs.PartialExpandedResource]()
maybeOrphanResources := addrs.MakeSet[addrs.AbsResourceInstance]()
for _, moduleAddr := range knownModules {
resourceAddr := n.Addr.Resource.Absolute(moduleAddr)
resources, partials, maybeOrphans, moreDiags := n.expandKnownModule(ctx, resourceAddr, imports, &g)
diags = diags.Append(moreDiags)
// Track all the resources we know about.
knownResources = knownResources.Union(resources)
partialResources = partialResources.Union(partials)
maybeOrphanResources = maybeOrphanResources.Union(maybeOrphans)
}
// TODO: What about targeting and force replacement for these resources?
// For now, it actually kind of works out because we don't want to make
// any changes for these and that's what happens. Later, when we start
// tracking deferrals in the plan, we'll need to make sure that the
// targeting is applied properly.
for _, moduleAddr := range partialModules {
resourceAddr := moduleAddr.Resource(n.Addr.Resource)
partialResources.Add(resourceAddr)
// And add a node to the graph for this resource.
g.Add(&nodePlannablePartialExpandedResource{
addr: resourceAddr,
config: n.Config,
resolvedProvider: n.ResolvedProvider,
skipPlanChanges: n.skipPlanChanges,
})
}
func() {
ss := ctx.PrevRunState()
state := ss.Lock()
defer ss.Unlock()
Resources:
for _, res := range state.Resources(n.Addr) {
for _, knownModule := range knownModules {
if knownModule.Equal(res.Addr.Module) {
// Then we handled this resource as part of the known
// modules processing.
continue Resources
}
}
for _, partialResource := range partialResources {
if partialResource.MatchesResource(res.Addr) {
for key := range res.Instances {
// Then each of the instances is a "maybe orphan"
// instance, and we need to add a node for that.
maybeOrphanResources.Add(res.Addr.Instance(key))
g.Add(n.concreteResource(addrs.MakeMap[addrs.AbsResourceInstance, string](), true)(NewNodeAbstractResourceInstance(res.Addr.Instance(key))))
}
// Move onto the next resource.
continue Resources
}
}
// Otherwise, everything in here is just a simple orphaned instance.
for key := range res.Instances {
inst := res.Addr.Instance(key)
abs := NewNodeAbstractResourceInstance(inst)
abs.AttachResourceState(res)
g.Add(n.concreteResourceOrphan(abs))
}
}
}()
// We need to ensure that all of the expanded import targets are actually
// present in the configuration, because we can't import something that
// doesn't exist.
//
// See the validateExpandedImportTargets function for the equivalent of
// this for the known resources path.
ImportValidation:
for _, addr := range imports.Keys() {
if knownResources.Has(addr) {
// Simple case, this is known to be in the configuration so we
// skip it.
continue
}
for _, partialAddr := range partialResources {
if partialAddr.MatchesInstance(addr) {
// This is a partial-expanded address, so we can't yet know
// whether it's in the configuration or not, and so we'll
// defer dealing with it to a future round.
continue ImportValidation
}
}
if maybeOrphanResources.Has(addr) {
// This is in the previous state but we can't yet know whether
// it's still desired, so we'll defer dealing with it to a future
// round.
continue
}
// If we get here then the import target is not in the configuration
// at all, and so we'll report an error.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Configuration for import target does not exist",
fmt.Sprintf("The configuration for the given import %s does not exist. All target instances must have an associated configuration to be imported.", addr),
))
}
// If this is a resource that participates in custom condition checks
// (i.e. it has preconditions or postconditions) then the check state
// wants to know the addresses of the checkable objects so that it can
// treat them as unknown status if we encounter an error before actually
// visiting the checks.
if checkState := ctx.Checks(); checkState.ConfigHasChecks(n.NodeAbstractResource.Addr) {
checkables := addrs.MakeSet[addrs.Checkable]()
for _, addr := range knownResources {
checkables.Add(addr)
}
for _, addr := range maybeOrphanResources {
checkables.Add(addr)
}
checkState.ReportCheckableObjects(n.NodeAbstractResource.Addr, checkables)
}
addRootNodeToGraph(&g)
return &g, diags
}
func (n *nodeExpandPlannableResource) expandKnownModule(globalCtx EvalContext, resAddr addrs.AbsResource, imports addrs.Map[addrs.AbsResourceInstance, string], g *Graph) (addrs.Set[addrs.AbsResourceInstance], addrs.Set[addrs.PartialExpandedResource], addrs.Set[addrs.AbsResourceInstance], tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
moduleCtx := evalContextForModuleInstance(globalCtx, resAddr.Module)
moreDiags := n.writeResourceState(moduleCtx, resAddr)
diags = diags.Append(moreDiags)
if moreDiags.HasErrors() {
return nil, nil, nil, diags
}
expander := moduleCtx.InstanceExpander()
_, knownInstKeys, haveUnknownKeys := expander.ResourceInstanceKeys(resAddr)
knownResources := addrs.MakeSet[addrs.AbsResourceInstance]()
partialResources := addrs.MakeSet[addrs.PartialExpandedResource]()
for _, key := range knownInstKeys {
knownResources.Add(resAddr.Instance(key))
}
if haveUnknownKeys {
partialResources.Add(resAddr.Module.UnexpandedResource(resAddr.Resource))
}
mustHaveIndex := len(knownInstKeys) != 1 || haveUnknownKeys
if len(knownInstKeys) == 1 && knownInstKeys[0] != addrs.NoKey {
mustHaveIndex = true
}
if mustHaveIndex {
var instanceAddrs []addrs.AbsResourceInstance
for _, key := range knownInstKeys {
instanceAddrs = append(instanceAddrs, resAddr.Instance(key))
}
diags = diags.Append(n.validForceReplaceTargets(instanceAddrs))
}
instGraph, maybeOrphanResources, instDiags := n.knownModuleSubgraph(moduleCtx, resAddr, knownInstKeys, haveUnknownKeys, imports)
diags = diags.Append(instDiags)
if instDiags.HasErrors() {
return nil, nil, nil, diags
}
g.Subsume(&instGraph.AcyclicGraph.Graph)
return knownResources, partialResources, maybeOrphanResources, diags
}
func (n *nodeExpandPlannableResource) knownModuleSubgraph(ctx EvalContext, addr addrs.AbsResource, knownInstKeys []addrs.InstanceKey, haveUnknownKeys bool, imports addrs.Map[addrs.AbsResourceInstance, string]) (*Graph, addrs.Set[addrs.AbsResourceInstance], tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
if n.Config == nil && n.generateConfigPath != "" && imports.Len() == 0 {
// We're generating configuration, but there's nothing to import, which
// means the import block must have expanded to zero instances.
// the instance expander will always return a single instance because
// we have assumed there will eventually be a configuration for this
// resource, so return here before we add that to the graph.
return &Graph{}, nil, diags
}
// Our graph transformers require access to the full state, so we'll
// temporarily lock it while we work on this.
state := ctx.State().Lock()
defer ctx.State().Unlock()
maybeOrphans := addrs.MakeSet[addrs.AbsResourceInstance]()
steps := []GraphTransformer{
DynamicTransformer(func(graph *Graph) error {
// We'll add a node for all the known instance keys.
for _, key := range knownInstKeys {
graph.Add(n.concreteResource(imports, n.skipPlanChanges)(NewNodeAbstractResourceInstance(addr.Instance(key))))
}
return nil
}),
DynamicTransformer(func(graph *Graph) error {
// We'll add a node if there are unknown instance keys.
if haveUnknownKeys {
graph.Add(&nodePlannablePartialExpandedResource{
addr: addr.Module.UnexpandedResource(addr.Resource),
config: n.Config,
resolvedProvider: n.ResolvedProvider,
skipPlanChanges: n.skipPlanChanges,
})
}
return nil
}),
DynamicTransformer(func(graph *Graph) error {
// We'll add nodes for any orphaned resources.
rs := state.Resource(addr)
Instances:
for key, inst := range rs.Instances {
if inst.Current == nil {
continue
}
for _, knownKey := range knownInstKeys {
if knownKey == key {
// Then we have a known instance, so we can skip this
// one - it's definitely not an orphan.
continue Instances
}
}
if haveUnknownKeys {
// Then this is a "maybe orphan" instance. It isn't mapped
// to a known instance but we have unknown keys so we don't
// know for sure that it's been deleted.
maybeOrphans.Add(addr.Instance(key))
graph.Add(n.concreteResource(addrs.MakeMap[addrs.AbsResourceInstance, string](), true)(NewNodeAbstractResourceInstance(addr.Instance(key))))
continue
}
// If none of the above, then this is definitely an orphan.
graph.Add(n.concreteResourceOrphan(NewNodeAbstractResourceInstance(addr.Instance(key))))
}
return nil
}),
// Attach the state
&AttachStateTransformer{State: state},
// Targeting
&TargetsTransformer{Targets: n.Targets},
// Connect references so ordering is correct
&ReferenceTransformer{},
// Make sure there is a single root
&RootTransformer{},
}
b := &BasicGraphBuilder{
Steps: steps,
Name: "nodeExpandPlannableResource",
}
graph, graphDiags := b.Build(addr.Module)
diags = diags.Append(graphDiags)
return graph, maybeOrphans, diags
}
// transformDynamic is a helper struct that wraps a single function, allowing
// us to transform a graph dynamically.
type transformDynamic struct {
Transformer func(*Graph) error
}
// DynamicTransformer returns a GraphTransformer that will apply the given
// function to the graph during the dynamic expansion phase.
func DynamicTransformer(f func(*Graph) error) GraphTransformer {
return &transformDynamic{Transformer: f}
}
// implements GraphTransformer
func (t *transformDynamic) Transform(g *Graph) error {
return t.Transformer(g)
}

@ -9,6 +9,7 @@ import (
"strings"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/dag"
"github.com/hashicorp/terraform/internal/states"
@ -47,14 +48,6 @@ type nodeExpandPlannableResource struct {
// structure in the future, as we need to compare for equality and take the
// union of multiple groups of dependencies.
dependencies []addrs.ConfigResource
// these are a record of all the addresses used in expansion so they can be
// validated as a complete set. While the type is guaranteed to be
// addrs.AbsResourceInstance for all these, we use addrs.Checkable because
// the expandedInstances need to be passed to the check state to register
// the instances for checks.
expandedImports addrs.Set[addrs.Checkable]
expandedInstances addrs.Set[addrs.Checkable]
}
var (
@ -103,11 +96,16 @@ func (n *nodeExpandPlannableResource) ModifyCreateBeforeDestroy(v bool) error {
}
func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, tfdiags.Diagnostics) {
var g Graph
// Expand the current module.
expander := ctx.InstanceExpander()
moduleInstances := expander.ExpandModule(n.Addr.Module)
// Expand the imports for this resource.
// TODO: Add support for unknown instances in import blocks.
var diags tfdiags.Diagnostics
imports, importDiags := n.expandResourceImports(ctx)
diags = diags.Append(importDiags)
// The possibility of partial-expanded modules and resources is guarded by a
// top-level option for the whole plan, so that we can preserve mainline
// behavior for the modules runtime. So, we currently branch off into an
@ -116,9 +114,128 @@ func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, tf
// handle.
if ctx.Deferrals().DeferralAllowed() {
pem := expander.UnknownModuleInstances(n.Addr.Module)
return n.dynamicExpandWithDeferralAllowed(ctx, moduleInstances, pem)
g, expandDiags := n.dynamicExpandPartial(ctx, moduleInstances, pem, imports)
diags = diags.Append(expandDiags)
return g, diags
}
g, expandDiags := n.dynamicExpand(ctx, moduleInstances, imports)
diags = diags.Append(expandDiags)
return g, diags
}
// Import blocks are expanded in conjunction with their associated resource block.
func (n *nodeExpandPlannableResource) expandResourceImports(ctx EvalContext) (addrs.Map[addrs.AbsResourceInstance, string], tfdiags.Diagnostics) {
// Imports maps the target address to an import ID.
imports := addrs.MakeMap[addrs.AbsResourceInstance, string]()
var diags tfdiags.Diagnostics
if len(n.importTargets) == 0 {
return imports, diags
}
// Import blocks are only valid within the root module, and must be
// evaluated within that context
ctx = evalContextForModuleInstance(ctx, addrs.RootModuleInstance)
for _, imp := range n.importTargets {
if imp.Config == nil {
// if we have a legacy addr, it was supplied on the commandline so
// there is nothing to expand
if !imp.LegacyAddr.Equal(addrs.AbsResourceInstance{}) {
imports.Put(imp.LegacyAddr, imp.IDString)
return imports, diags
}
// legacy import tests may have no configuration
log.Printf("[WARN] no configuration for import target %#v", imp)
continue
}
if imp.Config.ForEach == nil {
importID, evalDiags := evaluateImportIdExpression(imp.Config.ID, ctx, EvalDataForNoInstanceKey)
diags = diags.Append(evalDiags)
if diags.HasErrors() {
return imports, diags
}
traversal, hds := hcl.AbsTraversalForExpr(imp.Config.To)
diags = diags.Append(hds)
to, tds := addrs.ParseAbsResourceInstance(traversal)
diags = diags.Append(tds)
if diags.HasErrors() {
return imports, diags
}
imports.Put(to, importID)
log.Printf("[TRACE] expandResourceImports: found single import target %s", to)
continue
}
forEachData, forEachDiags := newForEachEvaluator(imp.Config.ForEach, ctx, false).ImportValues()
diags = diags.Append(forEachDiags)
if forEachDiags.HasErrors() {
return imports, diags
}
for _, keyData := range forEachData {
res, evalDiags := evalImportToExpression(imp.Config.To, keyData)
diags = diags.Append(evalDiags)
if diags.HasErrors() {
return imports, diags
}
importID, evalDiags := evaluateImportIdExpression(imp.Config.ID, ctx, keyData)
diags = diags.Append(evalDiags)
if diags.HasErrors() {
return imports, diags
}
imports.Put(res, importID)
log.Printf("[TRACE] expandResourceImports: expanded import target %s", res)
}
}
// filter out any import which already exist in state
state := ctx.State()
for _, el := range imports.Elements() {
if state.ResourceInstance(el.Key) != nil {
log.Printf("[DEBUG] expandResourceImports: skipping import address %s already in state", el.Key)
imports.Remove(el.Key)
}
}
return imports, diags
}
// validateExpandedImportTargets checks that all expanded imports correspond to
// a configured instance.
//
// This function is only called from within the dynamicExpand method, the
// import validation is inlined within the dynamicExpandPartial method for the
// alternate code path.
func (n *nodeExpandPlannableResource) validateExpandedImportTargets(expandedImports addrs.Map[addrs.AbsResourceInstance, string], expandedInstances addrs.Set[addrs.Checkable]) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
for _, addr := range expandedImports.Keys() {
if !expandedInstances.Has(addr) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Configuration for import target does not exist",
fmt.Sprintf("The configuration for the given import %s does not exist. All target instances must have an associated configuration to be imported.", addr),
))
return diags
}
}
return diags
}
func (n *nodeExpandPlannableResource) dynamicExpand(ctx EvalContext, moduleInstances []addrs.ModuleInstance, imports addrs.Map[addrs.AbsResourceInstance, string]) (*Graph, tfdiags.Diagnostics) {
var g Graph
var diags tfdiags.Diagnostics
// Lock the state while we inspect it
state := ctx.State().Lock()
@ -143,29 +260,12 @@ func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, tf
state = nil
ctx.State().Unlock()
// The concrete resource factory we'll use for orphans
concreteResourceOrphan := func(a *NodeAbstractResourceInstance) *NodePlannableResourceInstanceOrphan {
// Add the config and state since we don't do that via transforms
a.Config = n.Config
a.ResolvedProvider = n.ResolvedProvider
a.Schema = n.Schema
a.ProvisionerSchemas = n.ProvisionerSchemas
a.ProviderMetas = n.ProviderMetas
a.Dependencies = n.dependencies
return &NodePlannableResourceInstanceOrphan{
NodeAbstractResourceInstance: a,
skipRefresh: n.skipRefresh,
skipPlanChanges: n.skipPlanChanges,
}
}
for _, res := range orphans {
for key := range res.Instances {
addr := res.Addr.Instance(key)
abs := NewNodeAbstractResourceInstance(addr)
abs.AttachResourceState(res)
n := concreteResourceOrphan(abs)
n := n.concreteResourceOrphan(abs)
g.Add(n)
}
}
@ -177,19 +277,21 @@ func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, tf
// We'll gather up all of the leaf instances we learn about along the way
// so that we can inform the checks subsystem of which instances it should
// be expecting check results for, below.
var diags tfdiags.Diagnostics
n.expandedImports = addrs.MakeSet[addrs.Checkable]()
n.expandedInstances = addrs.MakeSet[addrs.Checkable]()
expandedInstances := addrs.MakeSet[addrs.Checkable]()
for _, module := range moduleInstances {
resAddr := n.Addr.Resource.Absolute(module)
err := n.expandResourceInstances(ctx, resAddr, &g)
instances, err := n.expandResourceInstances(ctx, resAddr, imports, &g)
diags = diags.Append(err)
for _, instance := range instances {
expandedInstances.Add(instance)
}
}
if diags.HasErrors() {
return nil, diags
}
diags = diags.Append(n.validateExpandedImportTargets())
diags = diags.Append(n.validateExpandedImportTargets(imports, expandedInstances))
// If this is a resource that participates in custom condition checks
// (i.e. it has preconditions or postconditions) then the check state
@ -197,7 +299,7 @@ func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, tf
// treat them as unknown status if we encounter an error before actually
// visiting the checks.
if checkState := ctx.Checks(); checkState.ConfigHasChecks(n.NodeAbstractResource.Addr) {
checkState.ReportCheckableObjects(n.NodeAbstractResource.Addr, n.expandedInstances)
checkState.ReportCheckableObjects(n.NodeAbstractResource.Addr, expandedInstances)
}
addRootNodeToGraph(&g)
@ -205,229 +307,6 @@ func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, tf
return &g, diags
}
// dynamicExpandWithDeferralAllowed is a variant of DynamicExpand that we use
// when deferred actions are enabled for the current plan.
//
// Once deferred actions are more stable and robust in the stacks runtime, it
// would be nice to integrate this logic a little better with the main
// DynamicExpand logic, but it's separate for now to minimize the risk of
// stacks-specific behavior impacting configurations that are not opted into it.
func (n *nodeExpandPlannableResource) dynamicExpandWithDeferralAllowed(globalCtx EvalContext, knownInsts []addrs.ModuleInstance, partialInsts addrs.Set[addrs.PartialExpandedModule]) (*Graph, tfdiags.Diagnostics) {
var g Graph
var diags tfdiags.Diagnostics
// We need to resolve the expansions of the resource itself, separately
// for each of the dynamic module prefixes it appears under.
knownAddrs := addrs.MakeSet[addrs.AbsResourceInstance]()
partialExpandedAddrs := addrs.MakeSet[addrs.PartialExpandedResource]()
for _, moduleAddr := range knownInsts {
resourceAddr := n.Addr.Resource.Absolute(moduleAddr)
// The rest of our work here needs to know which module instance it's
// working in, so that it can evaluate expressions in the appropriate scope.
moduleCtx := evalContextForModuleInstance(globalCtx, resourceAddr.Module)
// writeResourceState calculates the dynamic expansion of the given
// resource as a side-effect, along with its other work.
moreDiags := n.writeResourceState(moduleCtx, resourceAddr)
diags = diags.Append(moreDiags)
if moreDiags.HasErrors() {
continue
}
// We can now ask for all of the individual resource instances that
// we know, or for those with not-yet-known expansion.
expander := moduleCtx.InstanceExpander()
_, knownInstKeys, haveUnknownKeys := expander.ResourceInstanceKeys(resourceAddr)
for _, instKey := range knownInstKeys {
instAddr := resourceAddr.Instance(instKey)
knownAddrs.Add(instAddr)
}
if haveUnknownKeys {
partialAddr := moduleAddr.UnexpandedResource(resourceAddr.Resource)
partialExpandedAddrs.Add(partialAddr)
}
}
for _, moduleAddr := range partialInsts {
// Resources that appear under partial-expanded module prefixes are
// also partial-expanded resource addresses.
partialAddr := moduleAddr.Resource(n.Addr.Resource)
partialExpandedAddrs.Add(partialAddr)
}
// If we accumulated any error diagnostics in our work so far then
// we'll just bail out at this point.
if diags.HasErrors() {
return nil, diags
}
// We need to search the prior state for any resource instances that
// belong to module instances that are no longer declared in the
// configuration, which is one way a resource instance can be classified
// as an "orphan".
//
// However, if any instance is under a partial-expanded prefix then
// we can't know whether it's still desired or not, and so we'll need
// to defer dealing with it to a future plan/apply round.
//
// We need to compare with the resource instances we can find in the
// state, so we'll need to briefly hold the state lock while we inspect
// those. The following inline function limits the scope of the lock.
orphanAddrs := addrs.MakeSet[addrs.AbsResourceInstance]()
maybeOrphanAddrs := addrs.MakeSet[addrs.AbsResourceInstance]()
func() {
ss := globalCtx.PrevRunState()
state := ss.Lock()
defer ss.Unlock()
for _, res := range state.Resources(n.Addr) {
Instances:
for instKey := range res.Instances {
instAddr := res.Addr.Instance(instKey)
for _, partialAddr := range partialExpandedAddrs {
if partialAddr.MatchesInstance(instAddr) {
// The instance is beneath a partial-expanded prefix, so
// we can't decide yet whether it's an orphan or not,
// but we'll still note it so we can make sure to
// refresh its state.
maybeOrphanAddrs.Add(instAddr)
continue Instances
}
}
if !knownAddrs.Has(instAddr) {
// If we get here then the instance is not under an
// partial-expanded prefix and is not in our set of
// fully-known desired state instances, and so it's
// an "orphan".
orphanAddrs.Add(instAddr)
}
}
}
}()
// TEMP: The code that deals with some other language/workflow features
// is not yet updated to be able to handle partial-expanded resource
// address prefixes, to constrain the scope of the initial experimental
// implementation. We'll reject some of those cases with errors, just to
// be explicit that they don't work rather than just quietly doing
// something incomplete/broken/strange.
if len(partialExpandedAddrs) != 0 {
// Some other parts of the system aren't yet able to make sense of
// partial-expanded resource addresses, so we'll forbid them for
// now and improve on this in later iterations of the experiment.
if len(n.Targets) != 0 {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Cannot use resource targeting with unknown count or for_each",
"In the current phase of the unknown_instances language experiment, the -target=... planning option is not yet supported whenever unknown count or for_each are present.",
))
}
if len(n.forceReplace) != 0 {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Cannot use forced replacement with unknown count or for_each",
"In the current phase of the unknown_instances language experiment, the -replace=... planning option is not yet supported whenever unknown count or for_each are present.",
))
}
if diags.HasErrors() {
return nil, diags
}
}
// At this point we have four different sets of resource instance
// addresses:
// - knownAddrs are definitely in the desired state. They may or may not
// also be in the previous run state.
// - partialExpandedAddrs are unbounded sets of instances that _might_
// be in the desired state, but we can't know until a future round.
// - orphanAddrs are in the previous run state but definitely not in
// the desired state.
// - maybeOrphanAddrs are in the previous run state and we can't know
// whether they are in the desired state until a future round.
//
// Each resource instance in the union of all of the above sets needs to
// be represented as part of _some_ graph node, but we'll build them
// differently depending on which set they came from.
for _, addr := range knownAddrs {
log.Printf("[TRACE] nodeExpandPlannableResource: %s is definitely in the desired state", addr)
v := &NodePlannableResourceInstance{
NodeAbstractResourceInstance: NewNodeAbstractResourceInstance(addr),
skipRefresh: n.skipRefresh,
skipPlanChanges: n.skipPlanChanges,
forceReplace: n.forceReplace,
// TODO: replaceTriggeredBy?
// TODO: importTarget?
// TODO: ForceCreateBeforeDestroy?
}
v.ResolvedProvider = n.ResolvedProvider
v.Config = n.Config
g.Add(v)
}
for _, addr := range partialExpandedAddrs {
log.Printf("[TRACE] nodeExpandPlannableResource: desired instances matching %s are not yet known", addr)
v := &nodePlannablePartialExpandedResource{
addr: addr,
config: n.Config,
resolvedProvider: n.ResolvedProvider,
skipPlanChanges: n.skipPlanChanges,
}
g.Add(v)
}
for _, addr := range orphanAddrs {
log.Printf("[TRACE] nodeExpandPlannableResource: %s is in previous state but no longer desired", addr)
v := &NodePlannableResourceInstanceOrphan{
NodeAbstractResourceInstance: NewNodeAbstractResourceInstance(addr),
skipRefresh: n.skipRefresh,
skipPlanChanges: n.skipPlanChanges,
// TODO: forgetResources?
// TODO: forgetModules?
}
v.ResolvedProvider = n.ResolvedProvider
v.Config = n.Config
g.Add(v)
}
for _, addr := range maybeOrphanAddrs {
// For any object in the previous run state where we cannot yet know
// if it's an orphan, we can't yet properly plan it but we still
// want to refresh it, in the same way we would if this were a
// refresh-only plan.
log.Printf("[TRACE] nodeExpandPlannableResource: %s is in previous state but unknown whether it's still desired", addr)
v := &NodePlannableResourceInstance{
NodeAbstractResourceInstance: NewNodeAbstractResourceInstance(addr),
skipRefresh: n.skipRefresh,
skipPlanChanges: true, // We never plan for a "maybe-orphan"
forceReplace: n.forceReplace,
// TODO: replaceTriggeredBy?
// TODO: importTarget?
// TODO: ForceCreateBeforeDestroy?
}
v.ResolvedProvider = n.ResolvedProvider
v.Config = n.Config
g.Add(v)
}
addRootNodeToGraph(&g)
return &g, diags
}
// validateExpandedImportTargets checks that all expanded imports correspond to
// a configured instance.
func (n *nodeExpandPlannableResource) validateExpandedImportTargets() tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
for _, addr := range n.expandedImports {
if !n.expandedInstances.Has(addr) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Configuration for import target does not exist",
fmt.Sprintf("The configuration for the given import %s does not exist. All target instances must have an associated configuration to be imported.", addr),
))
return diags
}
}
return diags
}
// expandResourceInstances calculates the dynamic expansion for the resource
// itself in the context of a particular module instance.
//
@ -440,7 +319,7 @@ func (n *nodeExpandPlannableResource) validateExpandedImportTargets() tfdiags.Di
// within, the caller must register the final superset instAddrs with the
// checks subsystem so that it knows the fully expanded set of checkable
// object instances for this resource instance.
func (n *nodeExpandPlannableResource) expandResourceInstances(globalCtx EvalContext, resAddr addrs.AbsResource, g *Graph) tfdiags.Diagnostics {
func (n *nodeExpandPlannableResource) expandResourceInstances(globalCtx EvalContext, resAddr addrs.AbsResource, imports addrs.Map[addrs.AbsResourceInstance, string], g *Graph) ([]addrs.AbsResourceInstance, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
// The rest of our work here needs to know which module instance it's
@ -453,7 +332,7 @@ func (n *nodeExpandPlannableResource) expandResourceInstances(globalCtx EvalCont
moreDiags := n.writeResourceState(moduleCtx, resAddr)
diags = diags.Append(moreDiags)
if moreDiags.HasErrors() {
return diags
return nil, diags
}
// Before we expand our resource into potentially many resource instances,
@ -473,171 +352,30 @@ func (n *nodeExpandPlannableResource) expandResourceInstances(globalCtx EvalCont
mustHaveIndex = true
}
if mustHaveIndex {
for _, candidateAddr := range n.forceReplace {
if candidateAddr.Resource.Key == addrs.NoKey {
if n.Addr.Resource.Equal(candidateAddr.Resource.Resource) {
switch {
case len(instanceAddrs) == 0:
// In this case there _are_ no instances to replace, so
// there isn't any alternative address for us to suggest.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Warning,
"Incompletely-matched force-replace resource instance",
fmt.Sprintf(
"Your force-replace request for %s doesn't match any resource instances because this resource doesn't have any instances.",
candidateAddr,
),
))
case len(instanceAddrs) == 1:
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Warning,
"Incompletely-matched force-replace resource instance",
fmt.Sprintf(
"Your force-replace request for %s doesn't match any resource instances because it lacks an instance key.\n\nTo force replacement of the single declared instance, use the following option instead:\n -replace=%q",
candidateAddr, instanceAddrs[0],
),
))
default:
var possibleValidOptions strings.Builder
for _, addr := range instanceAddrs {
fmt.Fprintf(&possibleValidOptions, "\n -replace=%q", addr)
}
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Warning,
"Incompletely-matched force-replace resource instance",
fmt.Sprintf(
"Your force-replace request for %s doesn't match any resource instances because it lacks an instance key.\n\nTo force replacement of particular instances, use one or more of the following options instead:%s",
candidateAddr, possibleValidOptions.String(),
),
))
}
}
}
}
diags = diags.Append(n.validForceReplaceTargets(instanceAddrs))
}
// NOTE: The actual interpretation of n.forceReplace to produce replace
// actions is in the per-instance function we're about to call, because
// we need to evaluate it on a per-instance basis.
for _, addr := range instanceAddrs {
// If this resource is participating in the "checks" mechanism then our
// caller will need to know all of our expanded instance addresses as
// checkable object instances.
n.expandedInstances.Add(addr)
}
// Our graph builder mechanism expects to always be constructing new
// graphs rather than adding to existing ones, so we'll first
// construct a subgraph just for this individual modules's instances and
// then we'll steal all of its nodes and edges to incorporate into our
// main graph which contains all of the resource instances together.
instG, instDiags := n.resourceInstanceSubgraph(moduleCtx, resAddr, instanceAddrs)
instG, instDiags := n.resourceInstanceSubgraph(moduleCtx, resAddr, instanceAddrs, imports)
if instDiags.HasErrors() {
diags = diags.Append(instDiags)
return diags
return nil, diags
}
g.Subsume(&instG.AcyclicGraph.Graph)
return diags
}
// Import blocks are expanded in conjunction with their associated resource block.
func (n nodeExpandPlannableResource) expandResourceImports(ctx EvalContext, addr addrs.AbsResource, instanceAddrs []addrs.AbsResourceInstance) (addrs.Map[addrs.AbsResourceInstance, string], tfdiags.Diagnostics) {
// Imports maps the target address to an import ID.
imports := addrs.MakeMap[addrs.AbsResourceInstance, string]()
var diags tfdiags.Diagnostics
if len(n.importTargets) == 0 {
return imports, diags
}
// Import blocks are only valid within the root module, and must be
// evaluated within that context
ctx = evalContextForModuleInstance(ctx, addrs.RootModuleInstance)
for _, imp := range n.importTargets {
if imp.Config == nil {
// if we have a legacy addr, it was supplied on the commandline so
// there is nothing to expand
if !imp.LegacyAddr.Equal(addrs.AbsResourceInstance{}) {
imports.Put(imp.LegacyAddr, imp.IDString)
n.expandedImports.Add(imp.LegacyAddr)
return imports, diags
}
// legacy import tests may have no configuration
log.Printf("[WARN] no configuration for import target %#v", imp)
continue
}
if imp.Config.ForEach == nil {
importID, evalDiags := evaluateImportIdExpression(imp.Config.ID, ctx, EvalDataForNoInstanceKey)
diags = diags.Append(evalDiags)
if diags.HasErrors() {
return imports, diags
}
traversal, hds := hcl.AbsTraversalForExpr(imp.Config.To)
diags = diags.Append(hds)
to, tds := addrs.ParseAbsResourceInstance(traversal)
diags = diags.Append(tds)
if diags.HasErrors() {
return imports, diags
}
imports.Put(to, importID)
n.expandedImports.Add(to)
log.Printf("[TRACE] expandResourceImports: found single import target %s", to)
continue
}
forEachData, forEachDiags := newForEachEvaluator(imp.Config.ForEach, ctx, false).ImportValues()
diags = diags.Append(forEachDiags)
if forEachDiags.HasErrors() {
return imports, diags
}
for _, keyData := range forEachData {
res, evalDiags := evalImportToExpression(imp.Config.To, keyData)
diags = diags.Append(evalDiags)
if diags.HasErrors() {
return imports, diags
}
importID, evalDiags := evaluateImportIdExpression(imp.Config.ID, ctx, keyData)
diags = diags.Append(evalDiags)
if diags.HasErrors() {
return imports, diags
}
imports.Put(res, importID)
n.expandedImports.Add(res)
log.Printf("[TRACE] expandResourceImports: expanded import target %s", res)
}
}
// filter out any import which already exist in state
state := ctx.State()
for _, el := range imports.Elements() {
if state.ResourceInstance(el.Key) != nil {
log.Printf("[DEBUG] expandResourceImports: skipping import address %s already in state", el.Key)
imports.Remove(el.Key)
}
}
return imports, diags
return instanceAddrs, diags
}
func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext, addr addrs.AbsResource, instanceAddrs []addrs.AbsResourceInstance) (*Graph, tfdiags.Diagnostics) {
func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext, addr addrs.AbsResource, instanceAddrs []addrs.AbsResourceInstance, imports addrs.Map[addrs.AbsResourceInstance, string]) (*Graph, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
// Now that the resources are all expanded, we can expand the imports for
// this resource.
imports, importDiags := n.expandResourceImports(ctx, addr, instanceAddrs)
diags = diags.Append(importDiags)
if n.Config == nil && n.generateConfigPath != "" && imports.Len() == 0 {
// We're generating configuration, but there's nothing to import, which
// means the import block must have expanded to zero instances.
@ -652,8 +390,50 @@ func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext,
state := ctx.State().Lock()
defer ctx.State().Unlock()
// The concrete resource factory we'll use
concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex {
// Start creating the steps
steps := []GraphTransformer{
// Expand the count or for_each (if present)
&ResourceCountTransformer{
Concrete: n.concreteResource(imports, n.skipPlanChanges),
Schema: n.Schema,
Addr: n.ResourceAddr(),
InstanceAddrs: instanceAddrs,
},
// Add the count/for_each orphans
&OrphanResourceInstanceCountTransformer{
Concrete: n.concreteResourceOrphan,
Addr: addr,
InstanceAddrs: instanceAddrs,
State: state,
},
// Attach the state
&AttachStateTransformer{State: state},
// Targeting
&TargetsTransformer{Targets: n.Targets},
// Connect references so ordering is correct
&ReferenceTransformer{},
// Make sure there is a single root
&RootTransformer{},
}
// Build the graph
b := &BasicGraphBuilder{
Steps: steps,
Name: "nodeExpandPlannableResource",
}
graph, graphDiags := b.Build(addr.Module)
diags = diags.Append(graphDiags)
return graph, diags
}
func (n *nodeExpandPlannableResource) concreteResource(imports addrs.Map[addrs.AbsResourceInstance, string], skipPlanChanges bool) func(*NodeAbstractResourceInstance) dag.Vertex {
return func(a *NodeAbstractResourceInstance) dag.Vertex {
var m *NodePlannableResourceInstance
// If we're in legacy import mode (the import CLI command), we only need
@ -687,7 +467,7 @@ func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext,
// nodes that have it.
ForceCreateBeforeDestroy: n.CreateBeforeDestroy(),
skipRefresh: n.skipRefresh,
skipPlanChanges: n.skipPlanChanges,
skipPlanChanges: skipPlanChanges,
forceReplace: n.forceReplace,
}
@ -700,61 +480,68 @@ func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext,
return m
}
}
// The concrete resource factory we'll use for orphans
concreteResourceOrphan := func(a *NodeAbstractResourceInstance) dag.Vertex {
// Add the config and state since we don't do that via transforms
a.Config = n.Config
a.ResolvedProvider = n.ResolvedProvider
a.Schema = n.Schema
a.ProvisionerSchemas = n.ProvisionerSchemas
a.ProviderMetas = n.ProviderMetas
return &NodePlannableResourceInstanceOrphan{
NodeAbstractResourceInstance: a,
skipRefresh: n.skipRefresh,
skipPlanChanges: n.skipPlanChanges,
}
func (n *nodeExpandPlannableResource) concreteResourceOrphan(a *NodeAbstractResourceInstance) dag.Vertex {
// Add the config and state since we don't do that via transforms
a.Config = n.Config
a.ResolvedProvider = n.ResolvedProvider
a.Schema = n.Schema
a.ProvisionerSchemas = n.ProvisionerSchemas
a.ProviderMetas = n.ProviderMetas
return &NodePlannableResourceInstanceOrphan{
NodeAbstractResourceInstance: a,
skipRefresh: n.skipRefresh,
skipPlanChanges: n.skipPlanChanges,
}
}
// Start creating the steps
steps := []GraphTransformer{
// Expand the count or for_each (if present)
&ResourceCountTransformer{
Concrete: concreteResource,
Schema: n.Schema,
Addr: n.ResourceAddr(),
InstanceAddrs: instanceAddrs,
},
// Add the count/for_each orphans
&OrphanResourceInstanceCountTransformer{
Concrete: concreteResourceOrphan,
Addr: addr,
InstanceAddrs: instanceAddrs,
State: state,
},
// Attach the state
&AttachStateTransformer{State: state},
// Targeting
&TargetsTransformer{Targets: n.Targets},
// Connect references so ordering is correct
&ReferenceTransformer{},
func (n *nodeExpandPlannableResource) validForceReplaceTargets(instanceAddrs []addrs.AbsResourceInstance) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
// Make sure there is a single root
&RootTransformer{},
}
for _, candidateAddr := range n.forceReplace {
if candidateAddr.Resource.Key == addrs.NoKey {
if n.Addr.Resource.Equal(candidateAddr.Resource.Resource) {
switch {
case len(instanceAddrs) == 0:
// In this case there _are_ no instances to replace, so
// there isn't any alternative address for us to suggest.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Warning,
"Incompletely-matched force-replace resource instance",
fmt.Sprintf(
"Your force-replace request for %s doesn't match any resource instances because this resource doesn't have any instances.",
candidateAddr,
),
))
case len(instanceAddrs) == 1:
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Warning,
"Incompletely-matched force-replace resource instance",
fmt.Sprintf(
"Your force-replace request for %s doesn't match any resource instances because it lacks an instance key.\n\nTo force replacement of the single declared instance, use the following option instead:\n -replace=%q",
candidateAddr, instanceAddrs[0],
),
))
default:
var possibleValidOptions strings.Builder
for _, addr := range instanceAddrs {
fmt.Fprintf(&possibleValidOptions, "\n -replace=%q", addr)
}
// Build the graph
b := &BasicGraphBuilder{
Steps: steps,
Name: "nodeExpandPlannableResource",
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Warning,
"Incompletely-matched force-replace resource instance",
fmt.Sprintf(
"Your force-replace request for %s doesn't match any resource instances because it lacks an instance key.\n\nTo force replacement of particular instances, use one or more of the following options instead:%s",
candidateAddr, possibleValidOptions.String(),
),
))
}
}
}
}
graph, graphDiags := b.Build(addr.Module)
diags = diags.Append(graphDiags)
return graph, diags
return diags
}

Loading…
Cancel
Save