diff --git a/internal/plans/deferring/deferred.go b/internal/plans/deferring/deferred.go index a553199197..117729e1ee 100644 --- a/internal/plans/deferring/deferred.go +++ b/internal/plans/deferring/deferred.go @@ -197,6 +197,9 @@ func (d *Deferred) HaveAnyDeferrals() bool { // method will panic in that case. Callers should always test whether a resource // instance action should be deferred _before_ reporting that it has been. func (d *Deferred) ShouldDeferResourceInstanceChanges(addr addrs.AbsResourceInstance) bool { + d.mu.Lock() + defer d.mu.Unlock() + if d.externalDependencyDeferred { // This is an easy case: _all_ actions must be deferred. return true diff --git a/internal/terraform/context_apply_deferred_test.go b/internal/terraform/context_apply_deferred_test.go index f3784d45ca..9adbe36aa2 100644 --- a/internal/terraform/context_apply_deferred_test.go +++ b/internal/terraform/context_apply_deferred_test.go @@ -4,6 +4,7 @@ package terraform import ( + "encoding/json" "fmt" "sync" "testing" @@ -21,6 +22,10 @@ import ( ) type deferredActionsTest struct { + // If true, this test will be skipped. + skip bool + + // The configuration to use for this test. The keys are the filenames. configs map[string]string // The starting state for the first stage. This can be nil, and the test @@ -57,6 +62,14 @@ type deferredActionsTestStage struct { // Whether the plan should be completed during this stage. complete bool + + // Some of our tests produce expected warnings, set this to true to allow + // warnings to be present in the returned diagnostics. + allowWarnings bool + + // buildOpts is an optional field, that lets the test specify additional + // options to be used when building the plan. + buildOpts func(opts *PlanOpts) } var ( @@ -116,6 +129,7 @@ output "c" { "a": cty.ObjectVal(map[string]cty.Value{ "name": cty.StringVal("a"), "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), }), "": cty.ObjectVal(map[string]cty.Value{ "name": cty.UnknownVal(cty.String).Refine(). @@ -125,10 +139,12 @@ output "c" { "upstream_names": cty.SetVal([]cty.Value{ cty.StringVal("a"), }), + "output": cty.UnknownVal(cty.String), }), "c": cty.ObjectVal(map[string]cty.Value{ "name": cty.StringVal("c"), "upstream_names": cty.UnknownVal(cty.Set(cty.String)).RefineNotNull(), + "output": cty.UnknownVal(cty.String), }), }, wantActions: map[string]plans.Action{ @@ -144,12 +160,14 @@ output "c" { "a": cty.ObjectVal(map[string]cty.Value{ "name": cty.StringVal("a"), "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.StringVal("a"), }), }, wantOutputs: map[string]cty.Value{ "a": cty.ObjectVal(map[string]cty.Value{ "name": cty.StringVal("a"), "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.StringVal("a"), }), // FIXME: The system is currently producing incorrect @@ -204,6 +222,7 @@ output "c" { "a": cty.ObjectVal(map[string]cty.Value{ "name": cty.StringVal("a"), "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.StringVal("a"), }), // test.b is now planned for real, once for each instance "b:1": cty.ObjectVal(map[string]cty.Value{ @@ -211,12 +230,14 @@ output "c" { "upstream_names": cty.SetVal([]cty.Value{ cty.StringVal("a"), }), + "output": cty.UnknownVal(cty.String), }), "b:2": cty.ObjectVal(map[string]cty.Value{ "name": cty.StringVal("b:2"), "upstream_names": cty.SetVal([]cty.Value{ cty.StringVal("a"), }), + "output": cty.UnknownVal(cty.String), }), // test.c gets re-planned, so we can finalize its values // based on the new results from test.b. @@ -227,6 +248,7 @@ output "c" { cty.StringVal("b:1"), cty.StringVal("b:2"), }), + "output": cty.UnknownVal(cty.String), }), }, wantActions: map[string]plans.Action{ @@ -247,12 +269,14 @@ output "c" { "upstream_names": cty.SetVal([]cty.Value{ cty.StringVal("a"), }), + "output": cty.StringVal("b:1"), }), "b:2": cty.ObjectVal(map[string]cty.Value{ "name": cty.StringVal("b:2"), "upstream_names": cty.SetVal([]cty.Value{ cty.StringVal("a"), }), + "output": cty.StringVal("b:2"), }), "c": cty.ObjectVal(map[string]cty.Value{ "name": cty.StringVal("c"), @@ -261,6 +285,7 @@ output "c" { cty.StringVal("b:1"), cty.StringVal("b:2"), }), + "output": cty.StringVal("c"), }), }, wantOutputs: map[string]cty.Value{ @@ -269,6 +294,7 @@ output "c" { "a": cty.ObjectVal(map[string]cty.Value{ "name": cty.StringVal("a"), "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.StringVal("a"), }), "b": cty.ObjectVal(map[string]cty.Value{ "1": cty.ObjectVal(map[string]cty.Value{ @@ -276,12 +302,14 @@ output "c" { "upstream_names": cty.SetVal([]cty.Value{ cty.StringVal("a"), }), + "output": cty.StringVal("b:1"), }), "2": cty.ObjectVal(map[string]cty.Value{ "name": cty.StringVal("b:2"), "upstream_names": cty.SetVal([]cty.Value{ cty.StringVal("a"), }), + "output": cty.StringVal("b:2"), }), }), "c": cty.ObjectVal(map[string]cty.Value{ @@ -291,6 +319,7 @@ output "c" { cty.StringVal("b:1"), cty.StringVal("b:2"), }), + "output": cty.StringVal("c"), }), }, complete: true, @@ -307,18 +336,21 @@ output "c" { "a": cty.ObjectVal(map[string]cty.Value{ "name": cty.StringVal("a"), "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.StringVal("a"), }), "b:1": cty.ObjectVal(map[string]cty.Value{ "name": cty.StringVal("b:1"), "upstream_names": cty.SetVal([]cty.Value{ cty.StringVal("a"), }), + "output": cty.StringVal("b:1"), }), "b:2": cty.ObjectVal(map[string]cty.Value{ "name": cty.StringVal("b:2"), "upstream_names": cty.SetVal([]cty.Value{ cty.StringVal("a"), }), + "output": cty.StringVal("b:2"), }), "c": cty.ObjectVal(map[string]cty.Value{ "name": cty.StringVal("c"), @@ -327,6 +359,7 @@ output "c" { cty.StringVal("b:1"), cty.StringVal("b:2"), }), + "output": cty.StringVal("c"), }), }, wantActions: map[string]plans.Action{ @@ -343,130 +376,1102 @@ output "c" { }, }, } -) - -func TestContextApply_deferredActions(t *testing.T) { - tests := map[string]deferredActionsTest{ - "resource_for_each": resourceForEachTest, - } - for name, test := range tests { - t.Run(name, func(t *testing.T) { - - // Initialise the context. - cfg := testModuleInline(t, test.configs) - - // Initialise the state. - state := test.state - if state == nil { - state = states.NewState() - } - // Run through our cycle of planning and applying changes, checking - // the results at each step. - for ix, stage := range test.stages { - t.Run(fmt.Sprintf("round-%d", ix), func(t *testing.T) { + resourceCountTest = deferredActionsTest{ + configs: map[string]string{ + "main.tf": ` +variable "resource_count" { + type = number +} - provider := &deferredActionsProvider{ - plannedChanges: &deferredActionsChanges{ - changes: make(map[string]cty.Value), - }, - appliedChanges: &deferredActionsChanges{ - changes: make(map[string]cty.Value), - }, - } +resource "test" "a" { + name = "a" +} - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(provider.Provider()), - }, - }) +resource "test" "b" { + count = var.resource_count + name = "b:${count.index}" + upstream_names = [test.a.name] +} - plan, diags := ctx.Plan(cfg, state, &PlanOpts{ - Mode: plans.NormalMode, - DeferralAllowed: true, - SetVariables: func() InputValues { - values := InputValues{} - for name, value := range stage.inputs { - values[name] = &InputValue{ - Value: value, - SourceType: ValueFromCaller, - } - } - return values - }(), - }) - if plan.Complete != stage.complete { - t.Errorf("wrong completion status in plan: got %v, want %v", plan.Complete, stage.complete) - } +resource "test" "c" { + name = "c" + upstream_names = setunion( + [for v in test.b : v.name], + [test.a.name], + ) +} +`, + }, + stages: []deferredActionsTestStage{ + { + inputs: map[string]cty.Value{ + "resource_count": cty.DynamicVal, + }, + wantPlanned: map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + "": cty.ObjectVal(map[string]cty.Value{ + "name": cty.UnknownVal(cty.String).Refine(). + StringPrefixFull("b:"). + NotNull(). + NewValue(), + "upstream_names": cty.SetVal([]cty.Value{ + cty.StringVal("a"), + }), + "output": cty.UnknownVal(cty.String), + }), + "c": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("c"), + "upstream_names": cty.UnknownVal(cty.Set(cty.String)).RefineNotNull(), + "output": cty.UnknownVal(cty.String), + }), + }, + wantActions: map[string]plans.Action{ + "test.a": plans.Create, + }, + wantDeferred: map[string]plans.DeferredReason{ + "test.b[\"*\"]": plans.DeferredReasonInstanceCountUnknown, + "test.c": plans.DeferredReasonDeferredPrereq, + }, + wantApplied: map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.StringVal("a"), + }), + }, + wantOutputs: make(map[string]cty.Value), + }, + { + inputs: map[string]cty.Value{ + "resource_count": cty.NumberIntVal(2), + }, + wantPlanned: map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.StringVal("a"), + }), + "b:0": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b:0"), + "upstream_names": cty.SetVal([]cty.Value{ + cty.StringVal("a"), + }), + "output": cty.UnknownVal(cty.String), + }), + "b:1": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b:1"), + "upstream_names": cty.SetVal([]cty.Value{ + cty.StringVal("a"), + }), + "output": cty.UnknownVal(cty.String), + }), + "c": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("c"), + "upstream_names": cty.SetVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b:0"), + cty.StringVal("b:1"), + }), + "output": cty.UnknownVal(cty.String), + }), + }, + wantActions: map[string]plans.Action{ + // Since this plan is "complete", we expect to have a planned + // action for every resource instance, although test.a is + // no-op because nothing has changed for it since last round. + `test.a`: plans.NoOp, + `test.b[0]`: plans.Create, + `test.b[1]`: plans.Create, + `test.c`: plans.Create, + }, + wantDeferred: map[string]plans.DeferredReason{}, + complete: true, + // Don't run an apply for this cycle. + }, + }, + } - // We expect the correct planned changes and no diagnostics. - assertNoDiagnostics(t, diags) - provider.plannedChanges.Test(t, stage.wantPlanned) + resourceInModuleForEachTest = deferredActionsTest{ + configs: map[string]string{ + "main.tf": ` +variable "each" { + type = set(string) +} - // We expect the correct actions. - gotActions := make(map[string]plans.Action) - for _, cs := range plan.Changes.Resources { - gotActions[cs.Addr.String()] = cs.Action - } - if diff := cmp.Diff(stage.wantActions, gotActions); diff != "" { - t.Errorf("wrong actions in plan\n%s", diff) - } +module "mod" { + source = "./mod" - gotDeferred := make(map[string]plans.DeferredReason) - for _, dc := range plan.DeferredResources { - gotDeferred[dc.ChangeSrc.Addr.String()] = dc.DeferredReason - } - if diff := cmp.Diff(stage.wantDeferred, gotDeferred); diff != "" { - t.Errorf("wrong deferred reasons in plan\n%s", diff) - } + each = var.each +} - if stage.wantApplied == nil { - // Don't execute the apply stage if wantApplied is nil. - return - } +resource "test" "a" { + name = "a" + upstream_names = module.mod.names +} +`, + "mod/main.tf": ` +variable "each" { + type = set(string) +} - updatedState, diags := ctx.Apply(plan, cfg, nil) +resource "test" "names" { + for_each = var.each + name = "b:${each.key}" +} - // We expect the correct applied changes and no diagnostics. - assertNoDiagnostics(t, diags) - provider.appliedChanges.Test(t, stage.wantApplied) +output "names" { + value = [for v in test.names : v.name] +} +`, + }, + stages: []deferredActionsTestStage{ + { + inputs: map[string]cty.Value{ + "each": cty.DynamicVal, + }, + wantPlanned: map[string]cty.Value{ + "": cty.ObjectVal(map[string]cty.Value{ + "name": cty.UnknownVal(cty.String).Refine(). + StringPrefixFull("b:"). + NotNull(). + NewValue(), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + "a": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a"), + "upstream_names": cty.UnknownVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + }, + wantActions: map[string]plans.Action{}, + wantDeferred: map[string]plans.DeferredReason{ + "module.mod.test.names[\"*\"]": plans.DeferredReasonInstanceCountUnknown, + "test.a": plans.DeferredReasonDeferredPrereq, + }, + wantApplied: make(map[string]cty.Value), + wantOutputs: make(map[string]cty.Value), + }, + { + inputs: map[string]cty.Value{ + "each": cty.SetVal([]cty.Value{ + cty.StringVal("1"), + cty.StringVal("2"), + }), + }, + wantPlanned: map[string]cty.Value{ + "b:1": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b:1"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + "b:2": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b:2"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + "a": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a"), + "upstream_names": cty.SetVal([]cty.Value{cty.StringVal("b:1"), cty.StringVal("b:2")}), + "output": cty.UnknownVal(cty.String), + }), + }, + wantActions: map[string]plans.Action{ + "module.mod.test.names[\"1\"]": plans.Create, + "module.mod.test.names[\"2\"]": plans.Create, + "test.a": plans.Create, + }, + wantDeferred: map[string]plans.DeferredReason{}, + complete: true, + }, + }, + } - // We also want the correct output values. - gotOutputs := make(map[string]cty.Value) - for name, output := range updatedState.RootOutputValues { - gotOutputs[name] = output.Value - } - if diff := cmp.Diff(stage.wantOutputs, gotOutputs, ctydebug.CmpOptions); diff != "" { - t.Errorf("wrong output values\n%s", diff) - } + createBeforeDestroyLifecycleTest = deferredActionsTest{ + configs: map[string]string{ + "main.tf": ` +# This resource should be replaced in the plan, with create before destroy. +resource "test" "a" { + name = "a" - // Update the state for the next stage. - state = updatedState - }) - } - }) + lifecycle { + create_before_destroy = true } } -// deferredActionsChanges is a concurrent-safe map of changes from a -// deferredActionsProvider. -type deferredActionsChanges struct { - sync.RWMutex - changes map[string]cty.Value +# This resource should be replaced in the plan, with destroy before create. +resource "test" "b" { + name = "b" } -func (d *deferredActionsChanges) Set(key string, value cty.Value) { - d.Lock() - defer d.Unlock() - if d.changes == nil { - d.changes = make(map[string]cty.Value) - } - d.changes[key] = value +variable "resource_count" { + type = number } -func (d *deferredActionsChanges) Get(key string) cty.Value { - d.RLock() - defer d.RUnlock() +# These resources are "maybe-orphans", we should see a generic plan action for +# these, but nothing in the actual plan. +resource "test" "c" { + count = var.resource_count + name = "c:${count.index}" + + lifecycle { + create_before_destroy = true + } +} +`, + }, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test.a"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, // force a replace in our plan + AttrsJSON: mustParseJson(map[string]interface{}{ + "name": "a", + }), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }) + state.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test.b"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, // force a replace in our plan + AttrsJSON: mustParseJson(map[string]interface{}{ + "name": "b", + }), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }) + state.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test.c[0]"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, // force a replace in our plan + AttrsJSON: mustParseJson(map[string]interface{}{ + "name": "c:0", + }), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }) + }), + stages: []deferredActionsTestStage{ + { + inputs: map[string]cty.Value{ + "resource_count": cty.UnknownVal(cty.Number), + }, + wantPlanned: map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + "": cty.ObjectVal(map[string]cty.Value{ + "name": cty.UnknownVal(cty.String).Refine(). + StringPrefixFull("c:"). + NotNull(). + NewValue(), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + }, + wantActions: map[string]plans.Action{ + "test.a": plans.CreateThenDelete, + "test.b": plans.DeleteThenCreate, + }, + wantDeferred: map[string]plans.DeferredReason{ + "test.c[\"*\"]": plans.DeferredReasonInstanceCountUnknown, + }, + }, + }, + } + + // The next test isn't testing deferred actions specifically. Instead, + // they're just testing the "removed" block works within the alternate + // execution path for deferred actions. + + forgetResourcesTest = deferredActionsTest{ + configs: map[string]string{ + "main.tf": ` +# This should work as expected, with the resource being removed from state +# but not destroyed. This should work even with the unknown_instances experiment +# enabled. +removed { + from = test.a + + lifecycle { + destroy = false + } +} +`, + }, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test.a[0]"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, // force a replace in our plan + AttrsJSON: mustParseJson(map[string]interface{}{ + "name": "a", + }), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }) + state.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test.a[1]"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, // force a replace in our plan + AttrsJSON: mustParseJson(map[string]interface{}{ + "name": "a", + }), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }) + }), + stages: []deferredActionsTestStage{ + { + wantPlanned: map[string]cty.Value{}, + wantActions: map[string]plans.Action{ + "test.a[0]": plans.Forget, + "test.a[1]": plans.Forget, + }, + wantDeferred: map[string]plans.DeferredReason{}, + allowWarnings: true, + complete: true, + }, + }, + } + + importIntoUnknownInstancesTest = deferredActionsTest{ + configs: map[string]string{ + "main.tf": ` +variable "resource_count" { + type = number +} + +resource "test" "a" { + count = var.resource_count + name = "a" +} + +import { + id = "a" + to = test.a[0] +} +`, + }, + stages: []deferredActionsTestStage{ + { + inputs: map[string]cty.Value{ + "resource_count": cty.UnknownVal(cty.Number), + }, + wantPlanned: map[string]cty.Value{ + // This time round, we don't actually perform the import + // because we don't know which instances we're importing. + "a": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + }, + wantActions: make(map[string]plans.Action), + wantDeferred: map[string]plans.DeferredReason{ + "test.a[\"*\"]": plans.DeferredReasonInstanceCountUnknown, + }, + wantApplied: make(map[string]cty.Value), + wantOutputs: make(map[string]cty.Value), + }, + { + inputs: map[string]cty.Value{ + "resource_count": cty.NumberIntVal(1), + }, + wantPlanned: map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.StringVal("a"), + }), + }, + wantActions: map[string]plans.Action{ + "test.a[0]": plans.NoOp, // noop not create because of the import. + }, + wantDeferred: map[string]plans.DeferredReason{}, + complete: true, + }, + }, + } + + targetDeferredResourceTest = deferredActionsTest{ + configs: map[string]string{ + "main.tf": ` +variable "resource_count" { + type = number +} + +resource "test" "a" { + count = var.resource_count + name = "a:${count.index}" +} + +resource "test" "b" { + name = "b" +} + +resource "test" "c" { + name = "c" +} +`, + }, + stages: []deferredActionsTestStage{ + { + inputs: map[string]cty.Value{ + "resource_count": cty.UnknownVal(cty.Number), + }, + buildOpts: func(opts *PlanOpts) { + opts.Targets = []addrs.Targetable{mustResourceInstanceAddr("test.a[0]"), mustResourceInstanceAddr("test.b")} + }, + wantPlanned: map[string]cty.Value{ + "": cty.ObjectVal(map[string]cty.Value{ + "name": cty.UnknownVal(cty.String).Refine(). + StringPrefixFull("a:"). + NotNull(). + NewValue(), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + }, + wantActions: map[string]plans.Action{ + "test.b": plans.Create, + }, + wantDeferred: map[string]plans.DeferredReason{ + "test.a[\"*\"]": plans.DeferredReasonInstanceCountUnknown, + }, + allowWarnings: true, + }, + { + inputs: map[string]cty.Value{ + "resource_count": cty.UnknownVal(cty.Number), + }, + buildOpts: func(opts *PlanOpts) { + opts.Targets = []addrs.Targetable{mustResourceInstanceAddr("test.a"), mustResourceInstanceAddr("test.b")} + }, + wantPlanned: map[string]cty.Value{ + "": cty.ObjectVal(map[string]cty.Value{ + "name": cty.UnknownVal(cty.String).Refine(). + StringPrefixFull("a:"). + NotNull(). + NewValue(), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + }, + wantActions: map[string]plans.Action{ + "test.b": plans.Create, + }, + wantDeferred: map[string]plans.DeferredReason{ + "test.a[\"*\"]": plans.DeferredReasonInstanceCountUnknown, + }, + allowWarnings: true, + }, + }, + } + + targetDeferredResourceTriggersDependenciesTest = deferredActionsTest{ + // TODO: Enable this. This test is currently disabled because we don't + // pass the deferred resources into the plan at all, which means the + // apply phase targeting doesn't correctly work out the dependencies. + // We have another ticket that will add this information to the plan + // so we should revisit this when we have that. + skip: true, // skip this until we have a better way to handle this case. + configs: map[string]string{ + "main.tf": ` +resource "test" "a" { + count = 2 + name = "a:${count.index}" +} + +resource "test" "b" { + for_each = toset([ for v in test.a : v.output ]) + name = "b:${each.value}" +} +`, + }, + stages: []deferredActionsTestStage{ + { + buildOpts: func(opts *PlanOpts) { + opts.Targets = []addrs.Targetable{mustResourceInstanceAddr("test.b")} + }, + wantPlanned: map[string]cty.Value{ + "": cty.ObjectVal(map[string]cty.Value{ + "name": cty.UnknownVal(cty.String).Refine(). + StringPrefixFull("b:"). + NotNull(). + NewValue(), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + "a:0": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a:0"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + "a:1": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a:1"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + }, + wantActions: map[string]plans.Action{ + "test.a[0]": plans.Create, + "test.a[1]": plans.Create, + }, + wantApplied: map[string]cty.Value{ + "a:0": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a:0"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.StringVal("a:0"), + }), + "a:1": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a:1"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.StringVal("a:1"), + }), + }, + wantOutputs: make(map[string]cty.Value), + allowWarnings: true, + }, + { + buildOpts: func(opts *PlanOpts) { + opts.Targets = []addrs.Targetable{mustResourceInstanceAddr("test.b")} + }, + wantPlanned: map[string]cty.Value{ + "a:0": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a:0"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.StringVal("a:0"), + }), + "a:1": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a:1"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.StringVal("a:1"), + }), + "b:a:0": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b:a:0"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + "b:a:1": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b:a:1"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + }, + wantActions: map[string]plans.Action{ + "test.a[0]": plans.NoOp, + "test.a[1]": plans.NoOp, + "test.b[\"a:0\"]": plans.Create, + "test.b[\"a:1\"]": plans.Create, + }, + allowWarnings: true, + complete: true, + }, + }, + } + + replaceDeferredResourceTest = deferredActionsTest{ + configs: map[string]string{ + "main.tf": ` +variable "resource_count" { + type = number +} + +resource "test" "a" { + count = var.resource_count + name = "a:${count.index}" +} + +resource "test" "b" { + name = "b" +} + +resource "test" "c" { + name = "c" +} +`, + }, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test.a[0]"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: mustParseJson(map[string]interface{}{ + "name": "a:0", + "output": "a:0", + }), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }) + state.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test.b"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: mustParseJson(map[string]interface{}{ + "name": "b", + "output": "b", + }), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }) + state.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test.c"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: mustParseJson(map[string]interface{}{ + "name": "c", + "output": "c", + }), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }) + }), + stages: []deferredActionsTestStage{ + { + inputs: map[string]cty.Value{ + "resource_count": cty.UnknownVal(cty.Number), + }, + buildOpts: func(opts *PlanOpts) { + opts.ForceReplace = []addrs.AbsResourceInstance{mustResourceInstanceAddr("test.a[0]"), mustResourceInstanceAddr("test.b")} + }, + wantPlanned: map[string]cty.Value{ + "": cty.ObjectVal(map[string]cty.Value{ + "name": cty.UnknownVal(cty.String).Refine(). + StringPrefixFull("a:"). + NotNull(). + NewValue(), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + "c": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("c"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.StringVal("c"), + }), + }, + wantActions: map[string]plans.Action{ + "test.b": plans.DeleteThenCreate, + "test.c": plans.NoOp, + }, + wantDeferred: map[string]plans.DeferredReason{ + "test.a[\"*\"]": plans.DeferredReasonInstanceCountUnknown, + }, + }, + }, + } + + customConditionsTest = deferredActionsTest{ + configs: map[string]string{ + "main.tf": ` +variable "resource_count" { + type = number +} + +resource "test" "a" { + count = var.resource_count + name = "a:${count.index}" + + lifecycle { + postcondition { + condition = self.name == "a:${count.index}" + error_message = "self.name is not a:${count.index}" + } + } +} + +resource "test" "b" { + name = "b" + + lifecycle { + postcondition { + condition = self.name == "b" + error_message = "self.name is not b" + } + } +} +`, + }, + stages: []deferredActionsTestStage{ + { + inputs: map[string]cty.Value{ + "resource_count": cty.UnknownVal(cty.Number), + }, + wantPlanned: map[string]cty.Value{ + "": cty.ObjectVal(map[string]cty.Value{ + "name": cty.UnknownVal(cty.String).Refine(). + StringPrefixFull("a:"). + NotNull(). + NewValue(), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + }, + wantActions: map[string]plans.Action{ + "test.b": plans.Create, + }, + wantDeferred: map[string]plans.DeferredReason{ + "test.a[\"*\"]": plans.DeferredReasonInstanceCountUnknown, + }, + wantApplied: map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.StringVal("b"), + }), + }, + wantOutputs: make(map[string]cty.Value), + }, + { + inputs: map[string]cty.Value{ + "resource_count": cty.NumberIntVal(1), + }, + wantPlanned: map[string]cty.Value{ + "a:0": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("a:0"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.StringVal("b"), + }), + }, + wantActions: map[string]plans.Action{ + "test.a[0]": plans.Create, + "test.b": plans.NoOp, + }, + wantDeferred: map[string]plans.DeferredReason{}, + complete: true, + }, + }, + } + + customConditionsWithOrphansTest = deferredActionsTest{ + configs: map[string]string{ + "main.tf": ` +variable "resource_count" { + type = number +} + +resource "test" "b" { + name = "b" + + lifecycle { + postcondition { + condition = self.name == "b" + error_message = "self.name is not b" + } + } +} + +# test.c will already be in state, so we can test the actions of orphaned +# resources with custom conditions. +resource "test" "c" { + count = var.resource_count + name = "c:${count.index}" + + lifecycle { + postcondition { + condition = self.name == "c:${count.index}" + error_message = "self.name is not c:${count.index}" + } + } +} +`, + }, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test.c[0]"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: mustParseJson(map[string]interface{}{ + "name": "c:0", + "output": "c:0", + }), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + state.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test.c[1]"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: mustParseJson(map[string]interface{}{ + "name": "c:1", + "output": "c:1", + }), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }), + stages: []deferredActionsTestStage{ + { + inputs: map[string]cty.Value{ + "resource_count": cty.UnknownVal(cty.Number), + }, + wantPlanned: map[string]cty.Value{ + "": cty.ObjectVal(map[string]cty.Value{ + "name": cty.UnknownVal(cty.String).Refine(). + StringPrefixFull("c:"). + NotNull(). + NewValue(), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.UnknownVal(cty.String), + }), + }, + wantActions: map[string]plans.Action{ + "test.b": plans.Create, + }, + wantDeferred: map[string]plans.DeferredReason{ + "test.c[\"*\"]": plans.DeferredReasonInstanceCountUnknown, + }, + wantApplied: map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.StringVal("b"), + }), + }, + wantOutputs: make(map[string]cty.Value), + }, + { + inputs: map[string]cty.Value{ + "resource_count": cty.NumberIntVal(1), + }, + wantPlanned: map[string]cty.Value{ + "c:0": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("c:0"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.StringVal("c:0"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("b"), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.StringVal("b"), + }), + }, + wantActions: map[string]plans.Action{ + "test.c[0]": plans.NoOp, + "test.c[1]": plans.Delete, + "test.b": plans.NoOp, + }, + wantDeferred: map[string]plans.DeferredReason{}, + complete: true, + }, + }, + } +) + +func TestContextApply_deferredActions(t *testing.T) { + tests := map[string]deferredActionsTest{ + "resource_for_each": resourceForEachTest, + "resource_in_module_for_each": resourceInModuleForEachTest, + "resource_count": resourceCountTest, + "create_before_destroy": createBeforeDestroyLifecycleTest, + "forget_resources": forgetResourcesTest, + "import_into_unknown": importIntoUnknownInstancesTest, + "target_deferred_resource": targetDeferredResourceTest, + "target_deferred_resource_triggers_dependencies": targetDeferredResourceTriggersDependenciesTest, + "replace_deferred_resource": replaceDeferredResourceTest, + "custom_conditions": customConditionsTest, + "custom_conditions_with_orphans": customConditionsWithOrphansTest, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + if test.skip { + t.SkipNow() + } + + // Initialise the config. + cfg := testModuleInline(t, test.configs) + + // Initialise the state. + state := test.state + if state == nil { + state = states.NewState() + } + + // Run through our cycle of planning and applying changes, checking + // the results at each step. + for ix, stage := range test.stages { + t.Run(fmt.Sprintf("round-%d", ix), func(t *testing.T) { + + provider := &deferredActionsProvider{ + plannedChanges: &deferredActionsChanges{ + changes: make(map[string]cty.Value), + }, + appliedChanges: &deferredActionsChanges{ + changes: make(map[string]cty.Value), + }, + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(provider.Provider()), + }, + }) + + opts := &PlanOpts{ + Mode: plans.NormalMode, + DeferralAllowed: true, + SetVariables: func() InputValues { + values := InputValues{} + for name, value := range stage.inputs { + values[name] = &InputValue{ + Value: value, + SourceType: ValueFromCaller, + } + } + return values + }(), + } + + if stage.buildOpts != nil { + stage.buildOpts(opts) + } + + plan, diags := ctx.Plan(cfg, state, opts) + if plan.Complete != stage.complete { + t.Errorf("wrong completion status in plan: got %v, want %v", plan.Complete, stage.complete) + } + + // We expect the correct planned changes and no diagnostics. + if stage.allowWarnings { + assertNoErrors(t, diags) + } else { + assertNoDiagnostics(t, diags) + } + provider.plannedChanges.Test(t, stage.wantPlanned) + + // We expect the correct actions. + gotActions := make(map[string]plans.Action) + for _, cs := range plan.Changes.Resources { + gotActions[cs.Addr.String()] = cs.Action + } + if diff := cmp.Diff(stage.wantActions, gotActions); diff != "" { + t.Errorf("wrong actions in plan\n%s", diff) + } + + gotDeferred := make(map[string]plans.DeferredReason) + for _, dc := range plan.DeferredResources { + gotDeferred[dc.ChangeSrc.Addr.String()] = dc.DeferredReason + } + if diff := cmp.Diff(stage.wantDeferred, gotDeferred); diff != "" { + t.Errorf("wrong deferred reasons in plan\n%s", diff) + } + + if stage.wantApplied == nil { + // Don't execute the apply stage if wantApplied is nil. + return + } + + updatedState, diags := ctx.Apply(plan, cfg, nil) + + // We expect the correct applied changes and no diagnostics. + if stage.allowWarnings { + assertNoErrors(t, diags) + } else { + assertNoDiagnostics(t, diags) + } + provider.appliedChanges.Test(t, stage.wantApplied) + + // We also want the correct output values. + gotOutputs := make(map[string]cty.Value) + for name, output := range updatedState.RootOutputValues { + gotOutputs[name] = output.Value + } + if diff := cmp.Diff(stage.wantOutputs, gotOutputs, ctydebug.CmpOptions); diff != "" { + t.Errorf("wrong output values\n%s", diff) + } + + // Update the state for the next stage. + state = updatedState + }) + } + }) + } +} + +// deferredActionsChanges is a concurrent-safe map of changes from a +// deferredActionsProvider. +type deferredActionsChanges struct { + sync.RWMutex + changes map[string]cty.Value +} + +func (d *deferredActionsChanges) Set(key string, value cty.Value) { + d.Lock() + defer d.Unlock() + if d.changes == nil { + d.changes = make(map[string]cty.Value) + } + d.changes[key] = value +} + +func (d *deferredActionsChanges) Get(key string) cty.Value { + d.RLock() + defer d.RUnlock() return d.changes[key] } @@ -501,28 +1506,76 @@ func (provider *deferredActionsProvider) Provider() providers.Interface { Type: cty.Set(cty.String), Optional: true, }, + "output": { + Type: cty.String, + Computed: true, + }, }, }, }, }, }, PlanResourceChangeFn: func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + if req.ProposedNewState.IsNull() { + // Then we're deleting a concrete instance. + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + key := "" if v := req.Config.GetAttr("name"); v.IsKnown() { key = v.AsString() } - provider.plannedChanges.Set(key, req.ProposedNewState) + plannedState := req.ProposedNewState + if plannedState.GetAttr("output").IsNull() { + plannedStateValues := req.ProposedNewState.AsValueMap() + plannedStateValues["output"] = cty.UnknownVal(cty.String) + plannedState = cty.ObjectVal(plannedStateValues) + } + + provider.plannedChanges.Set(key, plannedState) return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, + PlannedState: plannedState, } }, ApplyResourceChangeFn: func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { key := req.Config.GetAttr("name").AsString() - provider.appliedChanges.Set(key, req.PlannedState) + + newState := req.PlannedState + if !newState.GetAttr("output").IsKnown() { + newStateValues := req.PlannedState.AsValueMap() + newStateValues["output"] = cty.StringVal(key) + newState = cty.ObjectVal(newStateValues) + } + + provider.appliedChanges.Set(key, newState) return providers.ApplyResourceChangeResponse{ - NewState: req.PlannedState, + NewState: newState, } }, + ImportResourceStateFn: func(request providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { + return providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: request.TypeName, + State: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal(request.ID), + "upstream_names": cty.NullVal(cty.Set(cty.String)), + "output": cty.StringVal(request.ID), + }), + }, + }, + } + }, + } +} + +func mustParseJson(values map[string]interface{}) []byte { + data, err := json.Marshal(values) + if err != nil { + panic(err) } + return data } diff --git a/internal/terraform/node_resource_partial_plan.go b/internal/terraform/node_resource_partial_plan.go new file mode 100644 index 0000000000..21774a381d --- /dev/null +++ b/internal/terraform/node_resource_partial_plan.go @@ -0,0 +1,329 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/internal/tfdiags" +) + +// This file is a temporary split from the node_resource_plan.go file. We handle +// the unknown instances branch of execution within here, while this is still +// being developed. +// +// We have split the files to make structuring the code easier, eventually once +// the functions within this file are production ready, we will merge them back +// into the node_resource_plan.go file. + +// dynamicExpandPartial is a variant of dynamicExpand that we use when deferred +// actions are enabled for the current plan. +// +// Once deferred actions are more stable and robust in the stacks runtime, it +// would be nice to integrate this logic a little better with the main +// DynamicExpand logic, but it's separate for now to minimize the risk of +// stacks-specific behavior impacting configurations that are not opted into it. +func (n *nodeExpandPlannableResource) dynamicExpandPartial(ctx EvalContext, knownModules []addrs.ModuleInstance, partialModules addrs.Set[addrs.PartialExpandedModule], imports addrs.Map[addrs.AbsResourceInstance, string]) (*Graph, tfdiags.Diagnostics) { + var g Graph + var diags tfdiags.Diagnostics + + knownResources := addrs.MakeSet[addrs.AbsResourceInstance]() + partialResources := addrs.MakeSet[addrs.PartialExpandedResource]() + maybeOrphanResources := addrs.MakeSet[addrs.AbsResourceInstance]() + + for _, moduleAddr := range knownModules { + resourceAddr := n.Addr.Resource.Absolute(moduleAddr) + resources, partials, maybeOrphans, moreDiags := n.expandKnownModule(ctx, resourceAddr, imports, &g) + diags = diags.Append(moreDiags) + + // Track all the resources we know about. + knownResources = knownResources.Union(resources) + partialResources = partialResources.Union(partials) + maybeOrphanResources = maybeOrphanResources.Union(maybeOrphans) + } + + // TODO: What about targeting and force replacement for these resources? + // For now, it actually kind of works out because we don't want to make + // any changes for these and that's what happens. Later, when we start + // tracking deferrals in the plan, we'll need to make sure that the + // targeting is applied properly. + + for _, moduleAddr := range partialModules { + resourceAddr := moduleAddr.Resource(n.Addr.Resource) + partialResources.Add(resourceAddr) + + // And add a node to the graph for this resource. + g.Add(&nodePlannablePartialExpandedResource{ + addr: resourceAddr, + config: n.Config, + resolvedProvider: n.ResolvedProvider, + skipPlanChanges: n.skipPlanChanges, + }) + } + + func() { + + ss := ctx.PrevRunState() + state := ss.Lock() + defer ss.Unlock() + + Resources: + for _, res := range state.Resources(n.Addr) { + + for _, knownModule := range knownModules { + if knownModule.Equal(res.Addr.Module) { + // Then we handled this resource as part of the known + // modules processing. + continue Resources + } + } + + for _, partialResource := range partialResources { + if partialResource.MatchesResource(res.Addr) { + + for key := range res.Instances { + // Then each of the instances is a "maybe orphan" + // instance, and we need to add a node for that. + maybeOrphanResources.Add(res.Addr.Instance(key)) + g.Add(n.concreteResource(addrs.MakeMap[addrs.AbsResourceInstance, string](), true)(NewNodeAbstractResourceInstance(res.Addr.Instance(key)))) + + } + + // Move onto the next resource. + continue Resources + } + } + + // Otherwise, everything in here is just a simple orphaned instance. + + for key := range res.Instances { + inst := res.Addr.Instance(key) + abs := NewNodeAbstractResourceInstance(inst) + abs.AttachResourceState(res) + g.Add(n.concreteResourceOrphan(abs)) + } + + } + + }() + + // We need to ensure that all of the expanded import targets are actually + // present in the configuration, because we can't import something that + // doesn't exist. + // + // See the validateExpandedImportTargets function for the equivalent of + // this for the known resources path. +ImportValidation: + for _, addr := range imports.Keys() { + if knownResources.Has(addr) { + // Simple case, this is known to be in the configuration so we + // skip it. + continue + } + + for _, partialAddr := range partialResources { + if partialAddr.MatchesInstance(addr) { + // This is a partial-expanded address, so we can't yet know + // whether it's in the configuration or not, and so we'll + // defer dealing with it to a future round. + continue ImportValidation + } + } + + if maybeOrphanResources.Has(addr) { + // This is in the previous state but we can't yet know whether + // it's still desired, so we'll defer dealing with it to a future + // round. + continue + } + + // If we get here then the import target is not in the configuration + // at all, and so we'll report an error. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Configuration for import target does not exist", + fmt.Sprintf("The configuration for the given import %s does not exist. All target instances must have an associated configuration to be imported.", addr), + )) + } + + // If this is a resource that participates in custom condition checks + // (i.e. it has preconditions or postconditions) then the check state + // wants to know the addresses of the checkable objects so that it can + // treat them as unknown status if we encounter an error before actually + // visiting the checks. + if checkState := ctx.Checks(); checkState.ConfigHasChecks(n.NodeAbstractResource.Addr) { + checkables := addrs.MakeSet[addrs.Checkable]() + for _, addr := range knownResources { + checkables.Add(addr) + } + for _, addr := range maybeOrphanResources { + checkables.Add(addr) + } + + checkState.ReportCheckableObjects(n.NodeAbstractResource.Addr, checkables) + } + + addRootNodeToGraph(&g) + return &g, diags +} + +func (n *nodeExpandPlannableResource) expandKnownModule(globalCtx EvalContext, resAddr addrs.AbsResource, imports addrs.Map[addrs.AbsResourceInstance, string], g *Graph) (addrs.Set[addrs.AbsResourceInstance], addrs.Set[addrs.PartialExpandedResource], addrs.Set[addrs.AbsResourceInstance], tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + moduleCtx := evalContextForModuleInstance(globalCtx, resAddr.Module) + + moreDiags := n.writeResourceState(moduleCtx, resAddr) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return nil, nil, nil, diags + } + + expander := moduleCtx.InstanceExpander() + _, knownInstKeys, haveUnknownKeys := expander.ResourceInstanceKeys(resAddr) + + knownResources := addrs.MakeSet[addrs.AbsResourceInstance]() + partialResources := addrs.MakeSet[addrs.PartialExpandedResource]() + + for _, key := range knownInstKeys { + knownResources.Add(resAddr.Instance(key)) + } + if haveUnknownKeys { + partialResources.Add(resAddr.Module.UnexpandedResource(resAddr.Resource)) + } + + mustHaveIndex := len(knownInstKeys) != 1 || haveUnknownKeys + if len(knownInstKeys) == 1 && knownInstKeys[0] != addrs.NoKey { + mustHaveIndex = true + } + if mustHaveIndex { + var instanceAddrs []addrs.AbsResourceInstance + for _, key := range knownInstKeys { + instanceAddrs = append(instanceAddrs, resAddr.Instance(key)) + } + diags = diags.Append(n.validForceReplaceTargets(instanceAddrs)) + } + + instGraph, maybeOrphanResources, instDiags := n.knownModuleSubgraph(moduleCtx, resAddr, knownInstKeys, haveUnknownKeys, imports) + diags = diags.Append(instDiags) + if instDiags.HasErrors() { + return nil, nil, nil, diags + } + g.Subsume(&instGraph.AcyclicGraph.Graph) + return knownResources, partialResources, maybeOrphanResources, diags +} + +func (n *nodeExpandPlannableResource) knownModuleSubgraph(ctx EvalContext, addr addrs.AbsResource, knownInstKeys []addrs.InstanceKey, haveUnknownKeys bool, imports addrs.Map[addrs.AbsResourceInstance, string]) (*Graph, addrs.Set[addrs.AbsResourceInstance], tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if n.Config == nil && n.generateConfigPath != "" && imports.Len() == 0 { + // We're generating configuration, but there's nothing to import, which + // means the import block must have expanded to zero instances. + // the instance expander will always return a single instance because + // we have assumed there will eventually be a configuration for this + // resource, so return here before we add that to the graph. + return &Graph{}, nil, diags + } + + // Our graph transformers require access to the full state, so we'll + // temporarily lock it while we work on this. + state := ctx.State().Lock() + defer ctx.State().Unlock() + + maybeOrphans := addrs.MakeSet[addrs.AbsResourceInstance]() + + steps := []GraphTransformer{ + + DynamicTransformer(func(graph *Graph) error { + // We'll add a node for all the known instance keys. + for _, key := range knownInstKeys { + graph.Add(n.concreteResource(imports, n.skipPlanChanges)(NewNodeAbstractResourceInstance(addr.Instance(key)))) + } + return nil + }), + + DynamicTransformer(func(graph *Graph) error { + // We'll add a node if there are unknown instance keys. + if haveUnknownKeys { + graph.Add(&nodePlannablePartialExpandedResource{ + addr: addr.Module.UnexpandedResource(addr.Resource), + config: n.Config, + resolvedProvider: n.ResolvedProvider, + skipPlanChanges: n.skipPlanChanges, + }) + } + return nil + }), + + DynamicTransformer(func(graph *Graph) error { + // We'll add nodes for any orphaned resources. + rs := state.Resource(addr) + Instances: + for key, inst := range rs.Instances { + if inst.Current == nil { + continue + } + + for _, knownKey := range knownInstKeys { + if knownKey == key { + // Then we have a known instance, so we can skip this + // one - it's definitely not an orphan. + continue Instances + } + } + + if haveUnknownKeys { + // Then this is a "maybe orphan" instance. It isn't mapped + // to a known instance but we have unknown keys so we don't + // know for sure that it's been deleted. + maybeOrphans.Add(addr.Instance(key)) + graph.Add(n.concreteResource(addrs.MakeMap[addrs.AbsResourceInstance, string](), true)(NewNodeAbstractResourceInstance(addr.Instance(key)))) + continue + } + + // If none of the above, then this is definitely an orphan. + graph.Add(n.concreteResourceOrphan(NewNodeAbstractResourceInstance(addr.Instance(key)))) + } + + return nil + }), + + // Attach the state + &AttachStateTransformer{State: state}, + + // Targeting + &TargetsTransformer{Targets: n.Targets}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Make sure there is a single root + &RootTransformer{}, + } + + b := &BasicGraphBuilder{ + Steps: steps, + Name: "nodeExpandPlannableResource", + } + graph, graphDiags := b.Build(addr.Module) + diags = diags.Append(graphDiags) + return graph, maybeOrphans, diags +} + +// transformDynamic is a helper struct that wraps a single function, allowing +// us to transform a graph dynamically. +type transformDynamic struct { + Transformer func(*Graph) error +} + +// DynamicTransformer returns a GraphTransformer that will apply the given +// function to the graph during the dynamic expansion phase. +func DynamicTransformer(f func(*Graph) error) GraphTransformer { + return &transformDynamic{Transformer: f} +} + +// implements GraphTransformer +func (t *transformDynamic) Transform(g *Graph) error { + return t.Transformer(g) +} diff --git a/internal/terraform/node_resource_plan.go b/internal/terraform/node_resource_plan.go index 09c9ff88f7..875e4b7229 100644 --- a/internal/terraform/node_resource_plan.go +++ b/internal/terraform/node_resource_plan.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform/internal/addrs" "github.com/hashicorp/terraform/internal/dag" "github.com/hashicorp/terraform/internal/states" @@ -47,14 +48,6 @@ type nodeExpandPlannableResource struct { // structure in the future, as we need to compare for equality and take the // union of multiple groups of dependencies. dependencies []addrs.ConfigResource - - // these are a record of all the addresses used in expansion so they can be - // validated as a complete set. While the type is guaranteed to be - // addrs.AbsResourceInstance for all these, we use addrs.Checkable because - // the expandedInstances need to be passed to the check state to register - // the instances for checks. - expandedImports addrs.Set[addrs.Checkable] - expandedInstances addrs.Set[addrs.Checkable] } var ( @@ -103,11 +96,16 @@ func (n *nodeExpandPlannableResource) ModifyCreateBeforeDestroy(v bool) error { } func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, tfdiags.Diagnostics) { - var g Graph - + // Expand the current module. expander := ctx.InstanceExpander() moduleInstances := expander.ExpandModule(n.Addr.Module) + // Expand the imports for this resource. + // TODO: Add support for unknown instances in import blocks. + var diags tfdiags.Diagnostics + imports, importDiags := n.expandResourceImports(ctx) + diags = diags.Append(importDiags) + // The possibility of partial-expanded modules and resources is guarded by a // top-level option for the whole plan, so that we can preserve mainline // behavior for the modules runtime. So, we currently branch off into an @@ -116,9 +114,128 @@ func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, tf // handle. if ctx.Deferrals().DeferralAllowed() { pem := expander.UnknownModuleInstances(n.Addr.Module) - return n.dynamicExpandWithDeferralAllowed(ctx, moduleInstances, pem) + g, expandDiags := n.dynamicExpandPartial(ctx, moduleInstances, pem, imports) + diags = diags.Append(expandDiags) + return g, diags + } + + g, expandDiags := n.dynamicExpand(ctx, moduleInstances, imports) + diags = diags.Append(expandDiags) + return g, diags +} + +// Import blocks are expanded in conjunction with their associated resource block. +func (n *nodeExpandPlannableResource) expandResourceImports(ctx EvalContext) (addrs.Map[addrs.AbsResourceInstance, string], tfdiags.Diagnostics) { + // Imports maps the target address to an import ID. + imports := addrs.MakeMap[addrs.AbsResourceInstance, string]() + var diags tfdiags.Diagnostics + + if len(n.importTargets) == 0 { + return imports, diags } + // Import blocks are only valid within the root module, and must be + // evaluated within that context + ctx = evalContextForModuleInstance(ctx, addrs.RootModuleInstance) + + for _, imp := range n.importTargets { + if imp.Config == nil { + // if we have a legacy addr, it was supplied on the commandline so + // there is nothing to expand + if !imp.LegacyAddr.Equal(addrs.AbsResourceInstance{}) { + imports.Put(imp.LegacyAddr, imp.IDString) + return imports, diags + } + + // legacy import tests may have no configuration + log.Printf("[WARN] no configuration for import target %#v", imp) + continue + } + + if imp.Config.ForEach == nil { + importID, evalDiags := evaluateImportIdExpression(imp.Config.ID, ctx, EvalDataForNoInstanceKey) + diags = diags.Append(evalDiags) + if diags.HasErrors() { + return imports, diags + } + + traversal, hds := hcl.AbsTraversalForExpr(imp.Config.To) + diags = diags.Append(hds) + to, tds := addrs.ParseAbsResourceInstance(traversal) + diags = diags.Append(tds) + if diags.HasErrors() { + return imports, diags + } + + imports.Put(to, importID) + + log.Printf("[TRACE] expandResourceImports: found single import target %s", to) + continue + } + + forEachData, forEachDiags := newForEachEvaluator(imp.Config.ForEach, ctx, false).ImportValues() + diags = diags.Append(forEachDiags) + if forEachDiags.HasErrors() { + return imports, diags + } + + for _, keyData := range forEachData { + res, evalDiags := evalImportToExpression(imp.Config.To, keyData) + diags = diags.Append(evalDiags) + if diags.HasErrors() { + return imports, diags + } + + importID, evalDiags := evaluateImportIdExpression(imp.Config.ID, ctx, keyData) + diags = diags.Append(evalDiags) + if diags.HasErrors() { + return imports, diags + } + + imports.Put(res, importID) + log.Printf("[TRACE] expandResourceImports: expanded import target %s", res) + } + } + + // filter out any import which already exist in state + state := ctx.State() + for _, el := range imports.Elements() { + if state.ResourceInstance(el.Key) != nil { + log.Printf("[DEBUG] expandResourceImports: skipping import address %s already in state", el.Key) + imports.Remove(el.Key) + } + } + + return imports, diags +} + +// validateExpandedImportTargets checks that all expanded imports correspond to +// a configured instance. +// +// This function is only called from within the dynamicExpand method, the +// import validation is inlined within the dynamicExpandPartial method for the +// alternate code path. +func (n *nodeExpandPlannableResource) validateExpandedImportTargets(expandedImports addrs.Map[addrs.AbsResourceInstance, string], expandedInstances addrs.Set[addrs.Checkable]) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + for _, addr := range expandedImports.Keys() { + if !expandedInstances.Has(addr) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Configuration for import target does not exist", + fmt.Sprintf("The configuration for the given import %s does not exist. All target instances must have an associated configuration to be imported.", addr), + )) + return diags + } + } + + return diags +} + +func (n *nodeExpandPlannableResource) dynamicExpand(ctx EvalContext, moduleInstances []addrs.ModuleInstance, imports addrs.Map[addrs.AbsResourceInstance, string]) (*Graph, tfdiags.Diagnostics) { + var g Graph + var diags tfdiags.Diagnostics + // Lock the state while we inspect it state := ctx.State().Lock() @@ -143,29 +260,12 @@ func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, tf state = nil ctx.State().Unlock() - // The concrete resource factory we'll use for orphans - concreteResourceOrphan := func(a *NodeAbstractResourceInstance) *NodePlannableResourceInstanceOrphan { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - a.Schema = n.Schema - a.ProvisionerSchemas = n.ProvisionerSchemas - a.ProviderMetas = n.ProviderMetas - a.Dependencies = n.dependencies - - return &NodePlannableResourceInstanceOrphan{ - NodeAbstractResourceInstance: a, - skipRefresh: n.skipRefresh, - skipPlanChanges: n.skipPlanChanges, - } - } - for _, res := range orphans { for key := range res.Instances { addr := res.Addr.Instance(key) abs := NewNodeAbstractResourceInstance(addr) abs.AttachResourceState(res) - n := concreteResourceOrphan(abs) + n := n.concreteResourceOrphan(abs) g.Add(n) } } @@ -177,19 +277,21 @@ func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, tf // We'll gather up all of the leaf instances we learn about along the way // so that we can inform the checks subsystem of which instances it should // be expecting check results for, below. - var diags tfdiags.Diagnostics - n.expandedImports = addrs.MakeSet[addrs.Checkable]() - n.expandedInstances = addrs.MakeSet[addrs.Checkable]() + + expandedInstances := addrs.MakeSet[addrs.Checkable]() for _, module := range moduleInstances { resAddr := n.Addr.Resource.Absolute(module) - err := n.expandResourceInstances(ctx, resAddr, &g) + instances, err := n.expandResourceInstances(ctx, resAddr, imports, &g) diags = diags.Append(err) + for _, instance := range instances { + expandedInstances.Add(instance) + } } if diags.HasErrors() { return nil, diags } - diags = diags.Append(n.validateExpandedImportTargets()) + diags = diags.Append(n.validateExpandedImportTargets(imports, expandedInstances)) // If this is a resource that participates in custom condition checks // (i.e. it has preconditions or postconditions) then the check state @@ -197,7 +299,7 @@ func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, tf // treat them as unknown status if we encounter an error before actually // visiting the checks. if checkState := ctx.Checks(); checkState.ConfigHasChecks(n.NodeAbstractResource.Addr) { - checkState.ReportCheckableObjects(n.NodeAbstractResource.Addr, n.expandedInstances) + checkState.ReportCheckableObjects(n.NodeAbstractResource.Addr, expandedInstances) } addRootNodeToGraph(&g) @@ -205,229 +307,6 @@ func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, tf return &g, diags } -// dynamicExpandWithDeferralAllowed is a variant of DynamicExpand that we use -// when deferred actions are enabled for the current plan. -// -// Once deferred actions are more stable and robust in the stacks runtime, it -// would be nice to integrate this logic a little better with the main -// DynamicExpand logic, but it's separate for now to minimize the risk of -// stacks-specific behavior impacting configurations that are not opted into it. -func (n *nodeExpandPlannableResource) dynamicExpandWithDeferralAllowed(globalCtx EvalContext, knownInsts []addrs.ModuleInstance, partialInsts addrs.Set[addrs.PartialExpandedModule]) (*Graph, tfdiags.Diagnostics) { - var g Graph - var diags tfdiags.Diagnostics - - // We need to resolve the expansions of the resource itself, separately - // for each of the dynamic module prefixes it appears under. - knownAddrs := addrs.MakeSet[addrs.AbsResourceInstance]() - partialExpandedAddrs := addrs.MakeSet[addrs.PartialExpandedResource]() - for _, moduleAddr := range knownInsts { - resourceAddr := n.Addr.Resource.Absolute(moduleAddr) - // The rest of our work here needs to know which module instance it's - // working in, so that it can evaluate expressions in the appropriate scope. - moduleCtx := evalContextForModuleInstance(globalCtx, resourceAddr.Module) - - // writeResourceState calculates the dynamic expansion of the given - // resource as a side-effect, along with its other work. - moreDiags := n.writeResourceState(moduleCtx, resourceAddr) - diags = diags.Append(moreDiags) - if moreDiags.HasErrors() { - continue - } - - // We can now ask for all of the individual resource instances that - // we know, or for those with not-yet-known expansion. - expander := moduleCtx.InstanceExpander() - _, knownInstKeys, haveUnknownKeys := expander.ResourceInstanceKeys(resourceAddr) - for _, instKey := range knownInstKeys { - instAddr := resourceAddr.Instance(instKey) - knownAddrs.Add(instAddr) - } - if haveUnknownKeys { - partialAddr := moduleAddr.UnexpandedResource(resourceAddr.Resource) - partialExpandedAddrs.Add(partialAddr) - } - } - for _, moduleAddr := range partialInsts { - // Resources that appear under partial-expanded module prefixes are - // also partial-expanded resource addresses. - partialAddr := moduleAddr.Resource(n.Addr.Resource) - partialExpandedAddrs.Add(partialAddr) - } - // If we accumulated any error diagnostics in our work so far then - // we'll just bail out at this point. - if diags.HasErrors() { - return nil, diags - } - - // We need to search the prior state for any resource instances that - // belong to module instances that are no longer declared in the - // configuration, which is one way a resource instance can be classified - // as an "orphan". - // - // However, if any instance is under a partial-expanded prefix then - // we can't know whether it's still desired or not, and so we'll need - // to defer dealing with it to a future plan/apply round. - // - // We need to compare with the resource instances we can find in the - // state, so we'll need to briefly hold the state lock while we inspect - // those. The following inline function limits the scope of the lock. - orphanAddrs := addrs.MakeSet[addrs.AbsResourceInstance]() - maybeOrphanAddrs := addrs.MakeSet[addrs.AbsResourceInstance]() - func() { - ss := globalCtx.PrevRunState() - state := ss.Lock() - defer ss.Unlock() - - for _, res := range state.Resources(n.Addr) { - Instances: - for instKey := range res.Instances { - instAddr := res.Addr.Instance(instKey) - - for _, partialAddr := range partialExpandedAddrs { - if partialAddr.MatchesInstance(instAddr) { - // The instance is beneath a partial-expanded prefix, so - // we can't decide yet whether it's an orphan or not, - // but we'll still note it so we can make sure to - // refresh its state. - maybeOrphanAddrs.Add(instAddr) - continue Instances - } - } - if !knownAddrs.Has(instAddr) { - // If we get here then the instance is not under an - // partial-expanded prefix and is not in our set of - // fully-known desired state instances, and so it's - // an "orphan". - orphanAddrs.Add(instAddr) - } - } - } - }() - - // TEMP: The code that deals with some other language/workflow features - // is not yet updated to be able to handle partial-expanded resource - // address prefixes, to constrain the scope of the initial experimental - // implementation. We'll reject some of those cases with errors, just to - // be explicit that they don't work rather than just quietly doing - // something incomplete/broken/strange. - if len(partialExpandedAddrs) != 0 { - // Some other parts of the system aren't yet able to make sense of - // partial-expanded resource addresses, so we'll forbid them for - // now and improve on this in later iterations of the experiment. - if len(n.Targets) != 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Cannot use resource targeting with unknown count or for_each", - "In the current phase of the unknown_instances language experiment, the -target=... planning option is not yet supported whenever unknown count or for_each are present.", - )) - } - if len(n.forceReplace) != 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Cannot use forced replacement with unknown count or for_each", - "In the current phase of the unknown_instances language experiment, the -replace=... planning option is not yet supported whenever unknown count or for_each are present.", - )) - } - if diags.HasErrors() { - return nil, diags - } - } - - // At this point we have four different sets of resource instance - // addresses: - // - knownAddrs are definitely in the desired state. They may or may not - // also be in the previous run state. - // - partialExpandedAddrs are unbounded sets of instances that _might_ - // be in the desired state, but we can't know until a future round. - // - orphanAddrs are in the previous run state but definitely not in - // the desired state. - // - maybeOrphanAddrs are in the previous run state and we can't know - // whether they are in the desired state until a future round. - // - // Each resource instance in the union of all of the above sets needs to - // be represented as part of _some_ graph node, but we'll build them - // differently depending on which set they came from. - for _, addr := range knownAddrs { - log.Printf("[TRACE] nodeExpandPlannableResource: %s is definitely in the desired state", addr) - v := &NodePlannableResourceInstance{ - NodeAbstractResourceInstance: NewNodeAbstractResourceInstance(addr), - skipRefresh: n.skipRefresh, - skipPlanChanges: n.skipPlanChanges, - forceReplace: n.forceReplace, - // TODO: replaceTriggeredBy? - // TODO: importTarget? - // TODO: ForceCreateBeforeDestroy? - } - v.ResolvedProvider = n.ResolvedProvider - v.Config = n.Config - g.Add(v) - } - for _, addr := range partialExpandedAddrs { - log.Printf("[TRACE] nodeExpandPlannableResource: desired instances matching %s are not yet known", addr) - v := &nodePlannablePartialExpandedResource{ - addr: addr, - config: n.Config, - resolvedProvider: n.ResolvedProvider, - skipPlanChanges: n.skipPlanChanges, - } - g.Add(v) - } - for _, addr := range orphanAddrs { - log.Printf("[TRACE] nodeExpandPlannableResource: %s is in previous state but no longer desired", addr) - v := &NodePlannableResourceInstanceOrphan{ - NodeAbstractResourceInstance: NewNodeAbstractResourceInstance(addr), - skipRefresh: n.skipRefresh, - skipPlanChanges: n.skipPlanChanges, - // TODO: forgetResources? - // TODO: forgetModules? - } - v.ResolvedProvider = n.ResolvedProvider - v.Config = n.Config - g.Add(v) - } - for _, addr := range maybeOrphanAddrs { - // For any object in the previous run state where we cannot yet know - // if it's an orphan, we can't yet properly plan it but we still - // want to refresh it, in the same way we would if this were a - // refresh-only plan. - log.Printf("[TRACE] nodeExpandPlannableResource: %s is in previous state but unknown whether it's still desired", addr) - v := &NodePlannableResourceInstance{ - NodeAbstractResourceInstance: NewNodeAbstractResourceInstance(addr), - skipRefresh: n.skipRefresh, - skipPlanChanges: true, // We never plan for a "maybe-orphan" - forceReplace: n.forceReplace, - // TODO: replaceTriggeredBy? - // TODO: importTarget? - // TODO: ForceCreateBeforeDestroy? - } - v.ResolvedProvider = n.ResolvedProvider - v.Config = n.Config - g.Add(v) - } - - addRootNodeToGraph(&g) - return &g, diags -} - -// validateExpandedImportTargets checks that all expanded imports correspond to -// a configured instance. -func (n *nodeExpandPlannableResource) validateExpandedImportTargets() tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - for _, addr := range n.expandedImports { - if !n.expandedInstances.Has(addr) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Configuration for import target does not exist", - fmt.Sprintf("The configuration for the given import %s does not exist. All target instances must have an associated configuration to be imported.", addr), - )) - return diags - } - } - - return diags -} - // expandResourceInstances calculates the dynamic expansion for the resource // itself in the context of a particular module instance. // @@ -440,7 +319,7 @@ func (n *nodeExpandPlannableResource) validateExpandedImportTargets() tfdiags.Di // within, the caller must register the final superset instAddrs with the // checks subsystem so that it knows the fully expanded set of checkable // object instances for this resource instance. -func (n *nodeExpandPlannableResource) expandResourceInstances(globalCtx EvalContext, resAddr addrs.AbsResource, g *Graph) tfdiags.Diagnostics { +func (n *nodeExpandPlannableResource) expandResourceInstances(globalCtx EvalContext, resAddr addrs.AbsResource, imports addrs.Map[addrs.AbsResourceInstance, string], g *Graph) ([]addrs.AbsResourceInstance, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics // The rest of our work here needs to know which module instance it's @@ -453,7 +332,7 @@ func (n *nodeExpandPlannableResource) expandResourceInstances(globalCtx EvalCont moreDiags := n.writeResourceState(moduleCtx, resAddr) diags = diags.Append(moreDiags) if moreDiags.HasErrors() { - return diags + return nil, diags } // Before we expand our resource into potentially many resource instances, @@ -473,171 +352,30 @@ func (n *nodeExpandPlannableResource) expandResourceInstances(globalCtx EvalCont mustHaveIndex = true } if mustHaveIndex { - for _, candidateAddr := range n.forceReplace { - if candidateAddr.Resource.Key == addrs.NoKey { - if n.Addr.Resource.Equal(candidateAddr.Resource.Resource) { - switch { - case len(instanceAddrs) == 0: - // In this case there _are_ no instances to replace, so - // there isn't any alternative address for us to suggest. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Incompletely-matched force-replace resource instance", - fmt.Sprintf( - "Your force-replace request for %s doesn't match any resource instances because this resource doesn't have any instances.", - candidateAddr, - ), - )) - case len(instanceAddrs) == 1: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Incompletely-matched force-replace resource instance", - fmt.Sprintf( - "Your force-replace request for %s doesn't match any resource instances because it lacks an instance key.\n\nTo force replacement of the single declared instance, use the following option instead:\n -replace=%q", - candidateAddr, instanceAddrs[0], - ), - )) - default: - var possibleValidOptions strings.Builder - for _, addr := range instanceAddrs { - fmt.Fprintf(&possibleValidOptions, "\n -replace=%q", addr) - } - - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Incompletely-matched force-replace resource instance", - fmt.Sprintf( - "Your force-replace request for %s doesn't match any resource instances because it lacks an instance key.\n\nTo force replacement of particular instances, use one or more of the following options instead:%s", - candidateAddr, possibleValidOptions.String(), - ), - )) - } - } - } - } + diags = diags.Append(n.validForceReplaceTargets(instanceAddrs)) } // NOTE: The actual interpretation of n.forceReplace to produce replace // actions is in the per-instance function we're about to call, because // we need to evaluate it on a per-instance basis. - for _, addr := range instanceAddrs { - // If this resource is participating in the "checks" mechanism then our - // caller will need to know all of our expanded instance addresses as - // checkable object instances. - n.expandedInstances.Add(addr) - } - // Our graph builder mechanism expects to always be constructing new // graphs rather than adding to existing ones, so we'll first // construct a subgraph just for this individual modules's instances and // then we'll steal all of its nodes and edges to incorporate into our // main graph which contains all of the resource instances together. - instG, instDiags := n.resourceInstanceSubgraph(moduleCtx, resAddr, instanceAddrs) + instG, instDiags := n.resourceInstanceSubgraph(moduleCtx, resAddr, instanceAddrs, imports) if instDiags.HasErrors() { diags = diags.Append(instDiags) - return diags + return nil, diags } g.Subsume(&instG.AcyclicGraph.Graph) - return diags -} - -// Import blocks are expanded in conjunction with their associated resource block. -func (n nodeExpandPlannableResource) expandResourceImports(ctx EvalContext, addr addrs.AbsResource, instanceAddrs []addrs.AbsResourceInstance) (addrs.Map[addrs.AbsResourceInstance, string], tfdiags.Diagnostics) { - // Imports maps the target address to an import ID. - imports := addrs.MakeMap[addrs.AbsResourceInstance, string]() - var diags tfdiags.Diagnostics - - if len(n.importTargets) == 0 { - return imports, diags - } - - // Import blocks are only valid within the root module, and must be - // evaluated within that context - ctx = evalContextForModuleInstance(ctx, addrs.RootModuleInstance) - - for _, imp := range n.importTargets { - if imp.Config == nil { - // if we have a legacy addr, it was supplied on the commandline so - // there is nothing to expand - if !imp.LegacyAddr.Equal(addrs.AbsResourceInstance{}) { - imports.Put(imp.LegacyAddr, imp.IDString) - n.expandedImports.Add(imp.LegacyAddr) - return imports, diags - } - - // legacy import tests may have no configuration - log.Printf("[WARN] no configuration for import target %#v", imp) - continue - } - - if imp.Config.ForEach == nil { - importID, evalDiags := evaluateImportIdExpression(imp.Config.ID, ctx, EvalDataForNoInstanceKey) - diags = diags.Append(evalDiags) - if diags.HasErrors() { - return imports, diags - } - - traversal, hds := hcl.AbsTraversalForExpr(imp.Config.To) - diags = diags.Append(hds) - to, tds := addrs.ParseAbsResourceInstance(traversal) - diags = diags.Append(tds) - if diags.HasErrors() { - return imports, diags - } - - imports.Put(to, importID) - n.expandedImports.Add(to) - - log.Printf("[TRACE] expandResourceImports: found single import target %s", to) - continue - } - - forEachData, forEachDiags := newForEachEvaluator(imp.Config.ForEach, ctx, false).ImportValues() - diags = diags.Append(forEachDiags) - if forEachDiags.HasErrors() { - return imports, diags - } - - for _, keyData := range forEachData { - res, evalDiags := evalImportToExpression(imp.Config.To, keyData) - diags = diags.Append(evalDiags) - if diags.HasErrors() { - return imports, diags - } - - importID, evalDiags := evaluateImportIdExpression(imp.Config.ID, ctx, keyData) - diags = diags.Append(evalDiags) - if diags.HasErrors() { - return imports, diags - } - - imports.Put(res, importID) - n.expandedImports.Add(res) - log.Printf("[TRACE] expandResourceImports: expanded import target %s", res) - } - } - - // filter out any import which already exist in state - state := ctx.State() - for _, el := range imports.Elements() { - if state.ResourceInstance(el.Key) != nil { - log.Printf("[DEBUG] expandResourceImports: skipping import address %s already in state", el.Key) - imports.Remove(el.Key) - } - } - - return imports, diags + return instanceAddrs, diags } -func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext, addr addrs.AbsResource, instanceAddrs []addrs.AbsResourceInstance) (*Graph, tfdiags.Diagnostics) { +func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext, addr addrs.AbsResource, instanceAddrs []addrs.AbsResourceInstance, imports addrs.Map[addrs.AbsResourceInstance, string]) (*Graph, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics - // Now that the resources are all expanded, we can expand the imports for - // this resource. - imports, importDiags := n.expandResourceImports(ctx, addr, instanceAddrs) - diags = diags.Append(importDiags) - if n.Config == nil && n.generateConfigPath != "" && imports.Len() == 0 { // We're generating configuration, but there's nothing to import, which // means the import block must have expanded to zero instances. @@ -652,8 +390,50 @@ func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext, state := ctx.State().Lock() defer ctx.State().Unlock() - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { + // Start creating the steps + steps := []GraphTransformer{ + // Expand the count or for_each (if present) + &ResourceCountTransformer{ + Concrete: n.concreteResource(imports, n.skipPlanChanges), + Schema: n.Schema, + Addr: n.ResourceAddr(), + InstanceAddrs: instanceAddrs, + }, + + // Add the count/for_each orphans + &OrphanResourceInstanceCountTransformer{ + Concrete: n.concreteResourceOrphan, + Addr: addr, + InstanceAddrs: instanceAddrs, + State: state, + }, + + // Attach the state + &AttachStateTransformer{State: state}, + + // Targeting + &TargetsTransformer{Targets: n.Targets}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Make sure there is a single root + &RootTransformer{}, + } + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Name: "nodeExpandPlannableResource", + } + graph, graphDiags := b.Build(addr.Module) + diags = diags.Append(graphDiags) + + return graph, diags +} + +func (n *nodeExpandPlannableResource) concreteResource(imports addrs.Map[addrs.AbsResourceInstance, string], skipPlanChanges bool) func(*NodeAbstractResourceInstance) dag.Vertex { + return func(a *NodeAbstractResourceInstance) dag.Vertex { var m *NodePlannableResourceInstance // If we're in legacy import mode (the import CLI command), we only need @@ -687,7 +467,7 @@ func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext, // nodes that have it. ForceCreateBeforeDestroy: n.CreateBeforeDestroy(), skipRefresh: n.skipRefresh, - skipPlanChanges: n.skipPlanChanges, + skipPlanChanges: skipPlanChanges, forceReplace: n.forceReplace, } @@ -700,61 +480,68 @@ func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext, return m } +} - // The concrete resource factory we'll use for orphans - concreteResourceOrphan := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - a.Schema = n.Schema - a.ProvisionerSchemas = n.ProvisionerSchemas - a.ProviderMetas = n.ProviderMetas - - return &NodePlannableResourceInstanceOrphan{ - NodeAbstractResourceInstance: a, - skipRefresh: n.skipRefresh, - skipPlanChanges: n.skipPlanChanges, - } +func (n *nodeExpandPlannableResource) concreteResourceOrphan(a *NodeAbstractResourceInstance) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + a.Schema = n.Schema + a.ProvisionerSchemas = n.ProvisionerSchemas + a.ProviderMetas = n.ProviderMetas + + return &NodePlannableResourceInstanceOrphan{ + NodeAbstractResourceInstance: a, + skipRefresh: n.skipRefresh, + skipPlanChanges: n.skipPlanChanges, } +} - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count or for_each (if present) - &ResourceCountTransformer{ - Concrete: concreteResource, - Schema: n.Schema, - Addr: n.ResourceAddr(), - InstanceAddrs: instanceAddrs, - }, - - // Add the count/for_each orphans - &OrphanResourceInstanceCountTransformer{ - Concrete: concreteResourceOrphan, - Addr: addr, - InstanceAddrs: instanceAddrs, - State: state, - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{Targets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, +func (n *nodeExpandPlannableResource) validForceReplaceTargets(instanceAddrs []addrs.AbsResourceInstance) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics - // Make sure there is a single root - &RootTransformer{}, - } + for _, candidateAddr := range n.forceReplace { + if candidateAddr.Resource.Key == addrs.NoKey { + if n.Addr.Resource.Equal(candidateAddr.Resource.Resource) { + switch { + case len(instanceAddrs) == 0: + // In this case there _are_ no instances to replace, so + // there isn't any alternative address for us to suggest. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Incompletely-matched force-replace resource instance", + fmt.Sprintf( + "Your force-replace request for %s doesn't match any resource instances because this resource doesn't have any instances.", + candidateAddr, + ), + )) + case len(instanceAddrs) == 1: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Incompletely-matched force-replace resource instance", + fmt.Sprintf( + "Your force-replace request for %s doesn't match any resource instances because it lacks an instance key.\n\nTo force replacement of the single declared instance, use the following option instead:\n -replace=%q", + candidateAddr, instanceAddrs[0], + ), + )) + default: + var possibleValidOptions strings.Builder + for _, addr := range instanceAddrs { + fmt.Fprintf(&possibleValidOptions, "\n -replace=%q", addr) + } - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Name: "nodeExpandPlannableResource", + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Incompletely-matched force-replace resource instance", + fmt.Sprintf( + "Your force-replace request for %s doesn't match any resource instances because it lacks an instance key.\n\nTo force replacement of particular instances, use one or more of the following options instead:%s", + candidateAddr, possibleValidOptions.String(), + ), + )) + } + } + } } - graph, graphDiags := b.Build(addr.Module) - diags = diags.Append(graphDiags) - return graph, diags + return diags }