Make the pre-destroy refresh a full plan

In order to complete the terraform destroy command, a refresh must first
be done to update state and remove any instances which have already been
deleted externally. This was being done with a refresh plan, which will
avoid any condition evaluations and avoid planning new instances. That
however can fail due to invalid references from resources that are
already missing from the state.

A new plan type to handle the concept of the pre-destroy-refresh is
needed here, which should probably be incorporated directly into the
destroy plan, just like the original refresh walk was incorporated into
the normal planning process. That however is major refactoring that is
not appropriate for a patch release.

Instead we make two discrete changes here to prevent blocking a destroy
plan. The first is to use a normal plan to refresh, which will enable
evaluation because missing and inconsistent instances will be planned
for creation and updates, allowing them to be evaluated. That is not
optimal of course, but does revert to the method used by previous
Terraform releases until a better method can be implemented.

The second change is adding a preDestroyRefresh flag to the planning
process. This is checked in any location which evalCheckRules is called,
and lets us change the diagnosticSeverity of the output to only be
warnings, matching the behavior of a normal refresh plan.
pull/32208/head
James Bardin 4 years ago
parent 0c7fda1906
commit 3ea704ef81

@ -33,6 +33,16 @@ type PlanOpts struct {
// instance using its corresponding provider.
SkipRefresh bool
// PreDestroyRefresh indicated that this is being passed to a plan used to
// refresh the state immediately before a destroy plan.
// FIXME: This is a temporary fix to allow the pre-destroy refresh to
// succeed. The refreshing operation during destroy must be a special case,
// which can allow for missing instances in the state, and avoid blocking
// on failing condition tests. The destroy plan itself should be
// responsible for this special case of refreshing, and the separate
// pre-destroy plan removed entirely.
PreDestroyRefresh bool
// SetVariables are the raw values for root module variables as provided
// by the user who is requesting the run, prior to any normalization or
// substitution of defaults. See the documentation for the InputValue
@ -337,8 +347,16 @@ func (c *Context) destroyPlan(config *configs.Config, prevRunState *states.State
if !opts.SkipRefresh && !prevRunState.Empty() {
log.Printf("[TRACE] Context.destroyPlan: calling Context.plan to get the effect of refreshing the prior state")
refreshOpts := *opts
refreshOpts.Mode = plans.RefreshOnlyMode
refreshPlan, refreshDiags := c.refreshOnlyPlan(config, prevRunState, &refreshOpts)
refreshOpts.Mode = plans.NormalMode
refreshOpts.PreDestroyRefresh = true
// FIXME: A normal plan is required here to refresh the state, because
// the state and configuration may not match during a destroy, and a
// normal refresh plan can fail with evaluation errors. In the future
// the destroy plan should take care of refreshing instances itself,
// where the special cases of evaluation and skipping condition checks
// can be done.
refreshPlan, refreshDiags := c.plan(config, prevRunState, &refreshOpts)
if refreshDiags.HasErrors() {
// NOTE: Normally we'd append diagnostics regardless of whether
// there are errors, just in case there are warnings we'd want to
@ -558,6 +576,7 @@ func (c *Context) planGraph(config *configs.Config, prevRunState *states.State,
Targets: opts.Targets,
ForceReplace: opts.ForceReplace,
skipRefresh: opts.SkipRefresh,
preDestroyRefresh: opts.PreDestroyRefresh,
Operation: walkPlan,
}).Build(addrs.RootModuleInstance)
return graph, walkPlan, diags

@ -3827,3 +3827,103 @@ resource "test_object" "b" {
_, diags = ctx.Plan(m, state, opts)
assertNoErrors(t, diags)
}
func TestContext2Plan_destroyPartialState(t *testing.T) {
m := testModuleInline(t, map[string]string{
"main.tf": `
resource "test_object" "a" {
}
output "out" {
value = module.mod.out
}
module "mod" {
source = "./mod"
}
`,
"./mod/main.tf": `
resource "test_object" "a" {
count = 2
lifecycle {
precondition {
# test_object_b has already been destroyed, so referencing the first
# instance must not fail during a destroy plan.
condition = test_object.b[0].test_string == "invalid"
error_message = "should not block destroy"
}
precondition {
# this failing condition should bot block a destroy plan
condition = !local.continue
error_message = "should not block destroy"
}
}
}
resource "test_object" "b" {
count = 2
}
locals {
continue = true
}
output "out" {
# the reference to test_object.b[0] may not be valid during a destroy plan,
# but should not fail.
value = local.continue ? test_object.a[1].test_string != "invalid" && test_object.b[0].test_string != "invalid" : false
precondition {
# test_object_b has already been destroyed, so referencing the first
# instance must not fail during a destroy plan.
condition = test_object.b[0].test_string == "invalid"
error_message = "should not block destroy"
}
precondition {
# this failing condition should bot block a destroy plan
condition = test_object.a[0].test_string == "invalid"
error_message = "should not block destroy"
}
}
`})
p := simpleMockProvider()
// This state could be the result of a failed destroy, leaving only 2
// remaining instances. We want to be able to continue the destroy to
// remove everything without blocking on invalid references or failing
// conditions.
state := states.NewState()
mod := state.EnsureModule(addrs.RootModuleInstance.Child("mod", addrs.NoKey))
mod.SetResourceInstanceCurrent(
mustResourceInstanceAddr("test_object.a[0]").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectTainted,
AttrsJSON: []byte(`{"test_string":"current"}`),
Dependencies: []addrs.ConfigResource{},
},
mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`),
)
mod.SetResourceInstanceCurrent(
mustResourceInstanceAddr("test_object.a[1]").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectTainted,
AttrsJSON: []byte(`{"test_string":"current"}`),
Dependencies: []addrs.ConfigResource{},
},
mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`),
)
ctx := testContext2(t, &ContextOpts{
Providers: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
},
})
_, diags := ctx.Plan(m, state, &PlanOpts{
Mode: plans.DestroyMode,
})
assertNoErrors(t, diags)
}

@ -49,6 +49,11 @@ type PlanGraphBuilder struct {
// skipRefresh indicates that we should skip refreshing managed resources
skipRefresh bool
// preDestroyRefresh indicates that we are executing the refresh which
// happens immediately before a destroy plan, which happens to use the
// normal planing mode so skipPlanChanges cannot be set.
preDestroyRefresh bool
// skipPlanChanges indicates that we should skip the step of comparing
// prior state with configuration and generating planned changes to
// resource instances. (This is for the "refresh only" planning mode,
@ -111,7 +116,7 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer {
&LocalTransformer{Config: b.Config},
&OutputTransformer{
Config: b.Config,
RefreshOnly: b.skipPlanChanges,
RefreshOnly: b.skipPlanChanges || b.preDestroyRefresh,
PlanDestroy: b.Operation == walkPlanDestroy,
// NOTE: We currently treat anything built with the plan graph
@ -214,6 +219,7 @@ func (b *PlanGraphBuilder) initPlan() {
NodeAbstractResource: a,
skipRefresh: b.skipRefresh,
skipPlanChanges: b.skipPlanChanges,
preDestroyRefresh: b.preDestroyRefresh,
forceReplace: b.ForceReplace,
}
}

@ -37,6 +37,8 @@ type NodeAbstractResourceInstance struct {
storedProviderConfig addrs.AbsProviderConfig
Dependencies []addrs.ConfigResource
preDestroyRefresh bool
}
// NewNodeAbstractResourceInstance creates an abstract resource instance graph
@ -682,6 +684,11 @@ func (n *NodeAbstractResourceInstance) plan(
return plan, state, keyData, diags.Append(err)
}
checkRuleSeverity := tfdiags.Error
if n.preDestroyRefresh {
checkRuleSeverity = tfdiags.Warning
}
if plannedChange != nil {
// If we already planned the action, we stick to that plan
createBeforeDestroy = plannedChange.Action == plans.CreateThenDelete
@ -708,7 +715,7 @@ func (n *NodeAbstractResourceInstance) plan(
addrs.ResourcePrecondition,
n.Config.Preconditions,
ctx, n.Addr, keyData,
tfdiags.Error,
checkRuleSeverity,
)
diags = diags.Append(checkDiags)
if diags.HasErrors() {

@ -25,6 +25,8 @@ type nodeExpandPlannableResource struct {
// skipRefresh indicates that we should skip refreshing individual instances
skipRefresh bool
preDestroyRefresh bool
// skipPlanChanges indicates we should skip trying to plan change actions
// for any instances.
skipPlanChanges bool
@ -328,6 +330,7 @@ func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext,
a.ProviderMetas = n.ProviderMetas
a.dependsOn = n.dependsOn
a.Dependencies = n.dependencies
a.preDestroyRefresh = n.preDestroyRefresh
return &NodePlannableResourceInstance{
NodeAbstractResourceInstance: a,

@ -83,7 +83,7 @@ func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (di
}
checkRuleSeverity := tfdiags.Error
if n.skipPlanChanges {
if n.skipPlanChanges || n.preDestroyRefresh {
checkRuleSeverity = tfdiags.Warning
}
@ -128,6 +128,11 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
var change *plans.ResourceInstanceChange
var instanceRefreshState *states.ResourceInstanceObject
checkRuleSeverity := tfdiags.Error
if n.skipPlanChanges || n.preDestroyRefresh {
checkRuleSeverity = tfdiags.Warning
}
_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
diags = diags.Append(err)
if diags.HasErrors() {
@ -280,7 +285,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
addrs.ResourcePostcondition,
n.Config.Postconditions,
ctx, n.ResourceInstanceAddr(), repeatData,
tfdiags.Error,
checkRuleSeverity,
)
diags = diags.Append(checkDiags)
} else {
@ -298,7 +303,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
addrs.ResourcePrecondition,
n.Config.Preconditions,
ctx, addr, repeatData,
tfdiags.Warning,
checkRuleSeverity,
)
diags = diags.Append(checkDiags)
@ -321,7 +326,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
addrs.ResourcePostcondition,
n.Config.Postconditions,
ctx, addr, repeatData,
tfdiags.Warning,
checkRuleSeverity,
)
diags = diags.Append(checkDiags)
}

Loading…
Cancel
Save