Merge pull request #36394 from hashicorp/jbardin/refresh-orphan

orphaned resources must be refreshed before plan
pull/36399/head
James Bardin 1 year ago committed by GitHub
commit 3c57915589
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -0,0 +1,5 @@
kind: BUG FIXES
body: Refreshed state was not used in the plan for orphaned resource instances
time: 2025-01-23T15:07:46.789595-05:00
custom:
Issue: "36394"

@ -6315,3 +6315,47 @@ resource "aws_instance" "foo" {
t.Errorf("unexpected error message\ngot: %s\nwant substring: %s", got, want)
}
}
func TestContext2Plan_orphanUpdateInstance(t *testing.T) {
// ean orphaned instance should still reflect the refreshed state in the plan
m := testModuleInline(t, map[string]string{
"main.tf": `
resource "test_object" "a" {
for_each = {}
}
`,
})
p := simpleMockProvider()
p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) {
state := req.PriorState.AsValueMap()
state["test_string"] = cty.StringVal("new")
resp.NewState = cty.ObjectVal(state)
return resp
}
state := states.BuildState(func(s *states.SyncState) {
s.SetResourceInstanceCurrent(mustResourceInstanceAddr(`test_object.a["old"]`), &states.ResourceInstanceObjectSrc{
AttrsJSON: []byte(`{"test_string":"old"}`),
Status: states.ObjectReady,
}, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`))
})
ctx := testContext2(t, &ContextOpts{
Providers: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
},
})
plan, diags := ctx.Plan(m, state, DefaultPlanOpts)
assertNoErrors(t, diags)
resourceType := p.GetProviderSchemaResponse.ResourceTypes["test_object"].Block.ImpliedType()
change, err := plan.Changes.ResourceInstance(mustResourceInstanceAddr(`test_object.a["old"]`)).Decode(resourceType)
if err != nil {
t.Fatal(err)
}
if change.Before.GetAttr("test_string").AsString() != "new" {
t.Fatalf("resource before value not refreshed in plan: %#v\n", change.Before)
}
}

@ -122,22 +122,6 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalCon
}
}
shouldDefer := ctx.Deferrals().ShouldDeferResourceInstanceChanges(n.Addr, n.Dependencies)
var change *plans.ResourceInstanceChange
var pDiags tfdiags.Diagnostics
var deferred *providers.Deferred
if forget {
change, pDiags = n.planForget(ctx, oldState, "")
diags = diags.Append(pDiags)
} else {
change, deferred, pDiags = n.planDestroy(ctx, oldState, "")
diags = diags.Append(pDiags)
}
if diags.HasErrors() {
return diags
}
if !n.skipRefresh && !forget {
// Refresh this instance even though it is going to be destroyed, in
// order to catch missing resources. If this is a normal plan,
@ -153,12 +137,7 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalCon
oldState = refreshedState
if deferred == nil {
// set the overall deferred status if it wasn't already set.
deferred = refreshDeferred
}
if deferred == nil && !shouldDefer {
if refreshDeferred == nil {
// only update the state if we're not deferring the change
diags = diags.Append(n.writeResourceInstanceState(ctx, refreshedState, refreshState))
if diags.HasErrors() {
@ -167,6 +146,22 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalCon
}
}
shouldDefer := ctx.Deferrals().ShouldDeferResourceInstanceChanges(n.Addr, n.Dependencies)
var change *plans.ResourceInstanceChange
var pDiags tfdiags.Diagnostics
var deferred *providers.Deferred
if forget {
change, pDiags = n.planForget(ctx, oldState, "")
diags = diags.Append(pDiags)
} else {
change, deferred, pDiags = n.planDestroy(ctx, oldState, "")
diags = diags.Append(pDiags)
}
if diags.HasErrors() {
return diags
}
// We might be able to offer an approximate reason for why we are
// planning to delete this object. (This is best-effort; we might
// sometimes not have a reason.)

Loading…
Cancel
Save