stacks: track deferrals on top-level graph methods

This makes it easier to identify when deferals happen in the workflow.
TF-13961
Liam Cervante 2 years ago committed by Daniel Schmidt
parent aed6f7586c
commit b3d653afb6
No known key found for this signature in database
GPG Key ID: 377C3A4D62FBBBE2

@ -1581,7 +1581,7 @@ output "a" {
}),
},
wantDeferred: map[string]ExpectedDeferred{
"test.a": {Reason: providers.DeferredReasonProviderConfigUnknown, Action: plans.Read},
"test.a": {Reason: providers.DeferredReasonProviderConfigUnknown, Action: plans.Update},
},
complete: false,
},
@ -2057,7 +2057,7 @@ import {
},
wantActions: make(map[string]plans.Action),
wantDeferred: map[string]ExpectedDeferred{
"test.a": {Reason: providers.DeferredReasonAbsentPrereq, Action: plans.NoOp},
"test.a": {Reason: providers.DeferredReasonProviderConfigUnknown, Action: plans.Create},
},
wantApplied: make(map[string]cty.Value),
wantOutputs: make(map[string]cty.Value),

@ -756,7 +756,7 @@ func (n *NodeAbstractResourceInstance) plan(
currentState *states.ResourceInstanceObject,
createBeforeDestroy bool,
forceReplace []addrs.AbsResourceInstance,
) (*plans.ResourceInstanceChange, *states.ResourceInstanceObject, instances.RepetitionData, tfdiags.Diagnostics) {
) (*plans.ResourceInstanceChange, *states.ResourceInstanceObject, *providers.Deferred, instances.RepetitionData, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
var keyData instances.RepetitionData
var deferred *providers.Deferred
@ -764,14 +764,14 @@ func (n *NodeAbstractResourceInstance) plan(
resource := n.Addr.Resource.Resource
provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
if err != nil {
return nil, nil, keyData, diags.Append(err)
return nil, nil, deferred, keyData, diags.Append(err)
}
schema, _ := providerSchema.SchemaForResourceAddr(resource)
if schema == nil {
// Should be caught during validation, so we don't bother with a pretty error here
diags = diags.Append(fmt.Errorf("provider does not support resource type %q", resource.Type))
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
// If we're importing and generating config, generate it now.
@ -785,7 +785,7 @@ func (n *NodeAbstractResourceInstance) plan(
tfdiags.Error,
"Resource has no configuration",
fmt.Sprintf("Terraform attempted to process a resource at %s that has no configuration. This is a bug in Terraform; please report it!", n.Addr.String())))
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
config := *n.Config
@ -813,26 +813,26 @@ func (n *NodeAbstractResourceInstance) plan(
)
diags = diags.Append(checkDiags)
if diags.HasErrors() {
return nil, nil, keyData, diags // failed preconditions prevent further evaluation
return nil, nil, deferred, keyData, diags // failed preconditions prevent further evaluation
}
// If we have a previous plan and the action was a noop, then the only
// reason we're in this method was to evaluate the preconditions. There's
// no need to re-plan this resource.
if plannedChange != nil && plannedChange.Action == plans.NoOp {
return plannedChange, currentState.DeepCopy(), keyData, diags
return plannedChange, currentState.DeepCopy(), deferred, keyData, diags
}
origConfigVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData)
diags = diags.Append(configDiags)
if configDiags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
metaConfigVal, metaDiags := n.providerMetas(ctx)
diags = diags.Append(metaDiags)
if diags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
var priorVal cty.Value
@ -875,7 +875,7 @@ func (n *NodeAbstractResourceInstance) plan(
)
diags = diags.Append(validateResp.Diagnostics.InConfigBody(config.Config, n.Addr.String()))
if diags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
// ignore_changes is meant to only apply to the configuration, so it must
@ -888,7 +888,7 @@ func (n *NodeAbstractResourceInstance) plan(
configValIgnored, ignoreChangeDiags := n.processIgnoreChanges(priorVal, origConfigVal, schema)
diags = diags.Append(ignoreChangeDiags)
if ignoreChangeDiags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
// Create an unmarked version of our config val and our prior val.
@ -904,7 +904,7 @@ func (n *NodeAbstractResourceInstance) plan(
return h.PreDiff(n.HookResourceIdentity(), addrs.NotDeposed, priorVal, proposedNewVal)
}))
if diags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
var resp providers.PlanResourceChangeResponse
@ -942,7 +942,7 @@ func (n *NodeAbstractResourceInstance) plan(
}
diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config, n.Addr.String()))
if diags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
// We mark this node as deferred at a later point when we know the complete change
@ -979,7 +979,7 @@ func (n *NodeAbstractResourceInstance) plan(
}
if diags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
if errs := objchange.AssertPlanValid(schema, unmarkedPriorVal, unmarkedConfigVal, plannedNewVal); len(errs) > 0 {
@ -1009,7 +1009,7 @@ func (n *NodeAbstractResourceInstance) plan(
),
))
}
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
}
}
@ -1030,7 +1030,7 @@ func (n *NodeAbstractResourceInstance) plan(
plannedNewVal, ignoreChangeDiags = n.processIgnoreChanges(unmarkedPriorVal, plannedNewVal, nil)
diags = diags.Append(ignoreChangeDiags)
if ignoreChangeDiags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
}
@ -1047,7 +1047,7 @@ func (n *NodeAbstractResourceInstance) plan(
reqRep, reqRepDiags := getRequiredReplaces(priorVal, plannedNewVal, resp.RequiresReplace, n.ResolvedProvider.Provider, n.Addr)
diags = diags.Append(reqRepDiags)
if diags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
action, actionReason := getAction(n.Addr, unmarkedPriorVal, unmarkedPlannedNewVal, createBeforeDestroy, forceReplace, reqRep)
@ -1104,10 +1104,10 @@ func (n *NodeAbstractResourceInstance) plan(
// append these new diagnostics if there's at least one error inside.
if resp.Diagnostics.HasErrors() {
diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config, n.Addr.String()))
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
if resp.Deferred != nil {
if deferred == nil && resp.Deferred != nil {
deferred = resp.Deferred
}
@ -1129,7 +1129,7 @@ func (n *NodeAbstractResourceInstance) plan(
))
}
if diags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
}
@ -1178,7 +1178,7 @@ func (n *NodeAbstractResourceInstance) plan(
return h.PostDiff(n.HookResourceIdentity(), addrs.NotDeposed, action, priorVal, plannedNewVal)
}))
if diags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
// Update our return plan
@ -1200,12 +1200,6 @@ func (n *NodeAbstractResourceInstance) plan(
RequiredReplace: reqRep,
}
// If we defer the change we need to report it and return early
if deferred != nil {
ctx.Deferrals().ReportResourceInstanceDeferred(n.Addr, deferred.Reason, plan)
return nil, nil, keyData, diags
}
// Update our return state
state := &states.ResourceInstanceObject{
// We use the special "planned" status here to note that this
@ -1219,7 +1213,7 @@ func (n *NodeAbstractResourceInstance) plan(
Private: plannedPrivate,
}
return plan, state, keyData, diags
return plan, state, deferred, keyData, diags
}
func (n *NodeAbstractResource) processIgnoreChanges(prior, config cty.Value, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) {

@ -275,12 +275,23 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext)
// Make a new diff, in case we've learned new values in the state
// during apply which we can now incorporate.
diffApply, _, repeatData, planDiags := n.plan(ctx, diff, state, false, n.forceReplace)
diffApply, _, deferred, repeatData, planDiags := n.plan(ctx, diff, state, false, n.forceReplace)
diags = diags.Append(planDiags)
if diags.HasErrors() {
return diags
}
if deferred != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Resource deferred during apply, but not during plan",
fmt.Sprintf(
"Terraform has encountered a bug where a provider would mark the resource %q as deferred during apply, but not during plan. This is most likely a bug in the provider. Please file an issue with the provider.", n.Addr,
),
))
return diags
}
// Compare the diffs
diags = diags.Append(n.checkPlannedChange(ctx, diff, diffApply, providerSchema))
if diags.HasErrors() {

@ -245,6 +245,7 @@ func (n *graphNodeImportStateSub) Execute(ctx EvalContext, op walkOperation) (di
fmt.Sprintf(
"While attempting to import an existing object to %q, "+
"the provider deferred reading the resource. "+
"This is a bug in the provider since deferrals are not supported when importing through the CLI, please file an issue."+
"Please either use an import block for importing this resource "+
"or remove the to be imported resource from your configuration, "+
"apply the configuration using \"terraform apply\", "+

@ -163,10 +163,12 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
importing := n.importTarget.IDString != ""
importId := n.importTarget.IDString
var deferred *providers.Deferred
// If the resource is to be imported, we now ask the provider for an Import
// and a Refresh, and save the resulting state to instanceRefreshState.
if importing {
instanceRefreshState, diags = n.importState(ctx, addr, importId, provider, providerSchema)
instanceRefreshState, deferred, diags = n.importState(ctx, addr, importId, provider, providerSchema)
} else {
var readDiags tfdiags.Diagnostics
instanceRefreshState, readDiags = n.readResourceInstanceState(ctx, addr)
@ -176,20 +178,22 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
}
}
// We'll save a snapshot of what we just read from the state into the
// prevRunState before we do anything else, since this will capture the
// result of any schema upgrading that readResourceInstanceState just did,
// but not include any out-of-band changes we might detect in in the
// refresh step below.
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, prevRunState))
if diags.HasErrors() {
return diags
}
// Also the refreshState, because that should still reflect schema upgrades
// even if it doesn't reflect upstream changes.
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
if diags.HasErrors() {
return diags
if deferred == nil {
// We'll save a snapshot of what we just read from the state into the
// prevRunState before we do anything else, since this will capture the
// result of any schema upgrading that readResourceInstanceState just did,
// but not include any out-of-band changes we might detect in in the
// refresh step below.
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, prevRunState))
if diags.HasErrors() {
return diags
}
// Also the refreshState, because that should still reflect schema upgrades
// even if it doesn't reflect upstream changes.
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
if diags.HasErrors() {
return diags
}
}
// In 0.13 we could be refreshing a resource with no config.
@ -202,28 +206,21 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
}
}
var refreshDeferred *providers.Deferred
// This is the state of the resource before we refresh the value, we need to keep track
// of this to report this as the before value if the refresh is deferred.
priorInstanceRefreshState := instanceRefreshState
// Refresh, maybe
// The import process handles its own refresh
if !n.skipRefresh && !importing {
s, deferred, refreshDiags := n.refresh(ctx, states.NotDeposed, instanceRefreshState, ctx.Deferrals().DeferralAllowed())
var refreshDiags tfdiags.Diagnostics
instanceRefreshState, refreshDeferred, refreshDiags = n.refresh(ctx, states.NotDeposed, instanceRefreshState, ctx.Deferrals().DeferralAllowed())
diags = diags.Append(refreshDiags)
if diags.HasErrors() {
return diags
}
if deferred == nil {
instanceRefreshState = s
} else {
ctx.Deferrals().ReportResourceInstanceDeferred(n.Addr, deferred.Reason, &plans.ResourceInstanceChange{
Addr: n.Addr,
Change: plans.Change{
Action: plans.Read,
Before: s.Value,
After: cty.DynamicVal,
},
})
}
if instanceRefreshState != nil {
// When refreshing we start by merging the stored dependencies and
// the configured dependencies. The configured dependencies will be
@ -233,7 +230,13 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
instanceRefreshState.Dependencies = mergeDeps(n.Dependencies, instanceRefreshState.Dependencies)
}
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
if deferred == nil && refreshDeferred != nil {
deferred = refreshDeferred
}
if deferred == nil {
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
}
if diags.HasErrors() {
return diags
}
@ -258,7 +261,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
return diags
}
change, instancePlanState, repeatData, planDiags := n.plan(
change, instancePlanState, planDeferred, repeatData, planDiags := n.plan(
ctx, nil, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace,
)
diags = diags.Append(planDiags)
@ -266,7 +269,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
// If we are importing and generating a configuration, we need to
// ensure the change is written out so the configuration can be
// captured.
if len(n.generateConfigPath) > 0 {
if planDeferred == nil && len(n.generateConfigPath) > 0 {
// Update our return plan
change := &plans.ResourceInstanceChange{
Addr: n.Addr,
@ -286,6 +289,10 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
return diags
}
if deferred == nil && planDeferred != nil {
deferred = planDeferred
}
if importing {
change.Importing = &plans.Importing{ID: importId}
}
@ -298,10 +305,11 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
}
deferrals := ctx.Deferrals()
if deferrals.IsResourceInstanceDeferred(n.Addr) {
// This resource instance is already deferred, probably because it
// was deferred during the refresh or import step.
if deferred != nil {
// Then this resource has been deferred either during the import,
// refresh or planning stage. We'll report the deferral and
// store what we could produce in the deferral tracker.
deferrals.ReportResourceInstanceDeferred(addr, deferred.Reason, change)
} else if !deferrals.ShouldDeferResourceInstanceChanges(n.Addr) {
// We intentionally write the change before the subsequent checks, because
// all of the checks below this point are for problems caused by the
@ -417,6 +425,19 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
checkRuleSeverity,
)
diags = diags.Append(checkDiags)
// In this case we skipped planning changes and therefore need to report the deferral
// here, if there was one.
if refreshDeferred != nil {
ctx.Deferrals().ReportResourceInstanceDeferred(addr, deferred.Reason, &plans.ResourceInstanceChange{
Addr: n.Addr,
Change: plans.Change{
Action: plans.Read,
Before: priorInstanceRefreshState.Value,
After: instanceRefreshState.Value,
},
})
}
}
return diags
@ -455,7 +476,7 @@ func (n *NodePlannableResourceInstance) replaceTriggered(ctx EvalContext, repDat
return diags
}
func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.AbsResourceInstance, importId string, provider providers.Interface, providerSchema providers.ProviderSchema) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.AbsResourceInstance, importId string, provider providers.Interface, providerSchema providers.ProviderSchema) (*states.ResourceInstanceObject, *providers.Deferred, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
absAddr := addr.Resource.Absolute(ctx.Path())
hookResourceID := HookResourceIdentity{
@ -463,18 +484,20 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
ProviderAddr: n.ResolvedProvider.Provider,
}
var deferred *providers.Deferred
diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {
return h.PrePlanImport(hookResourceID, importId)
}))
if diags.HasErrors() {
return nil, diags
return nil, deferred, diags
}
schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.Resource.Resource)
if schema == nil {
// Should be caught during validation, so we don't bother with a pretty error here
diags = diags.Append(fmt.Errorf("provider does not support resource type for %q", n.Addr))
return nil, diags
return nil, deferred, diags
}
var resp providers.ImportResourceStateResponse
@ -493,7 +516,7 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
// document the expectation somewhere. This shouldn't happen in
// production, so we don't bother with a pretty error.
diags = diags.Append(fmt.Errorf("override blocks do not support config generation"))
return nil, diags
return nil, deferred, diags
}
forEach, _, _ := evaluateForEachExpression(n.Config.ForEach, ctx, false)
@ -511,7 +534,7 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
// later), so only add the configDiags into the main diags if we
// found actual errors.
diags = diags.Append(configDiags)
return nil, diags
return nil, deferred, diags
}
configVal, _ = configVal.UnmarkDeep()
@ -540,15 +563,29 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
})
}
diags = diags.Append(resp.Diagnostics)
deferred = resp.Deferred
if diags.HasErrors() {
return nil, diags
return nil, deferred, diags
}
imported := resp.ImportedResources
var importedState *states.ResourceInstanceObject
if len(imported) > 1 {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Multiple import states not supported",
fmt.Sprintf("While attempting to import with ID %s, the provider "+
"returned multiple resource instance states. This "+
"is not currently supported.",
importId,
),
))
}
if len(imported) == 0 {
if resp.Deferred == nil {
// Sanity check against the providers. If the provider defers the response, it may not have been able to return a state, so we'll only error if no deferral was returned.
if deferred == nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Import returned no resources",
@ -557,46 +594,26 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
importId,
),
))
return nil, diags
} else {
importedState = &states.ResourceInstanceObject{
Value: cty.NullVal(schema.ImpliedType()),
}
return nil, deferred, diags
}
} else {
importedState = imported[0].AsInstanceObject()
// If we were deferred, then let's make up a resource to represent the
// state we're going to import.
state := providers.ImportedResource{
TypeName: addr.Resource.Resource.Type,
State: cty.NullVal(schema.ImpliedType()),
}
// We skip the read and further validation since we make up the state
// of the imported resource anyways.
return state.AsInstanceObject(), deferred, diags
}
for _, obj := range imported {
log.Printf("[TRACE] graphNodeImportState: import %s %q produced instance object of type %s", absAddr.String(), importId, obj.TypeName)
}
if len(imported) > 1 {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Multiple import states not supported",
fmt.Sprintf("While attempting to import with ID %s, the provider "+
"returned multiple resource instance states. This "+
"is not currently supported.",
importId,
),
))
}
// If the import was deferred we can't do more here
if resp.Deferred != nil {
ctx.Deferrals().ReportResourceInstanceDeferred(n.Addr, resp.Deferred.Reason, &plans.ResourceInstanceChange{
Addr: n.Addr,
Change: plans.Change{
Action: plans.NoOp,
Before: cty.UnknownVal(cty.DynamicPseudoType),
After: cty.UnknownVal(cty.DynamicPseudoType),
Importing: &plans.Importing{
ID: importId,
},
},
})
return nil, diags
}
importedState := imported[0].AsInstanceObject()
// We can only call the hooks and validate the imported state if we have
// actually done the import.
@ -605,22 +622,24 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostPlanImport(hookResourceID, imported)
}))
}
if imported[0].TypeName == "" {
diags = diags.Append(fmt.Errorf("import of %s didn't set type", n.Addr.String()))
return nil, diags
}
if imported[0].TypeName == "" {
diags = diags.Append(fmt.Errorf("import of %s didn't set type", n.Addr.String()))
return nil, deferred, diags
}
if deferred == nil && importedState.Value.IsNull() {
// It's actually okay for a deferred import to have returned a null.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Import returned null resource",
fmt.Sprintf("While attempting to import with ID %s, the provider"+
"returned an instance with no state.",
n.importTarget.IDString,
),
))
if importedState.Value.IsNull() {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Import returned null resource",
fmt.Sprintf("While attempting to import with ID %s, the provider"+
"returned an instance with no state.",
n.importTarget.IDString,
),
))
}
}
// refresh
@ -634,22 +653,16 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
instanceRefreshState, refreshDeferred, refreshDiags := riNode.refresh(ctx, states.NotDeposed, importedState, ctx.Deferrals().DeferralAllowed())
diags = diags.Append(refreshDiags)
if diags.HasErrors() {
return instanceRefreshState, diags
return instanceRefreshState, deferred, diags
}
// report the refresh was deferred, we don't need to error since the import step succeeded
if refreshDeferred != nil {
ctx.Deferrals().ReportResourceInstanceDeferred(n.Addr, refreshDeferred.Reason, &plans.ResourceInstanceChange{
Addr: n.Addr,
Change: plans.Change{
Action: plans.Read,
After: instanceRefreshState.Value,
},
})
if deferred == nil && refreshDeferred != nil {
deferred = refreshDeferred
}
// verify the existence of the imported resource
if instanceRefreshState.Value.IsNull() && refreshDeferred == nil {
if refreshDeferred == nil && instanceRefreshState.Value.IsNull() {
var diags tfdiags.Diagnostics
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
@ -664,13 +677,15 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
n.Addr,
),
))
return instanceRefreshState, diags
return instanceRefreshState, deferred, diags
}
// If we're importing and generating config, generate it now.
if len(n.generateConfigPath) > 0 {
// If we're importing and generating config, generate it now. We only
// generate config if the import isn't being deferred. We should generate
// the configuration in the plan that the import is actually happening in.
if deferred == nil && len(n.generateConfigPath) > 0 {
if n.Config != nil {
return instanceRefreshState, diags.Append(fmt.Errorf("tried to generate config for %s, but it already exists", n.Addr))
return instanceRefreshState, nil, diags.Append(fmt.Errorf("tried to generate config for %s, but it already exists", n.Addr))
}
// Generate the HCL string first, then parse the HCL body from it.
@ -686,7 +701,7 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
synthHCLFile, hclDiags := hclsyntax.ParseConfig([]byte(generatedHCLAttributes), filepath.Base(n.generateConfigPath), hcl.Pos{Byte: 0, Line: 1, Column: 1})
diags = diags.Append(hclDiags)
if hclDiags.HasErrors() {
return instanceRefreshState, diags
return instanceRefreshState, nil, diags
}
// We have to do a kind of mini parsing of the content here to correctly
@ -696,7 +711,7 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
_, remain, resourceDiags := synthHCLFile.Body.PartialContent(configs.ResourceBlockSchema)
diags = diags.Append(resourceDiags)
if resourceDiags.HasErrors() {
return instanceRefreshState, diags
return instanceRefreshState, nil, diags
}
n.Config = &configs.Resource{
@ -709,8 +724,13 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
}
}
diags = diags.Append(riNode.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
return instanceRefreshState, diags
if deferred == nil {
// Only write the state if the change isn't being deferred. We're also
// reporting the deferred status to the caller, so they should know
// not to read from the state.
diags = diags.Append(riNode.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
}
return instanceRefreshState, deferred, diags
}
// generateHCLStringAttributes produces a string in HCL format for the given

Loading…
Cancel
Save