Merge pull request #35001 from hashicorp/TF-13961

stacks: handle deferred import
ensure-all-deferred-provider-workflows-have-same-behavior
Daniel Schmidt 2 years ago committed by GitHub
commit 3a1c70d4fa
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -622,6 +622,9 @@ func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateReques
protoReq := &proto.ImportResourceState_Request{
TypeName: r.TypeName,
Id: r.ID,
ClientCapabilities: &proto.ClientCapabilities{
DeferralAllowed: r.ClientCapabilities.DeferralAllowed,
},
}
protoResp, err := p.client.ImportResourceState(p.ctx, protoReq)
@ -630,6 +633,7 @@ func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateReques
return resp
}
resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
resp.Deferred = convert.ProtoToDeferred(protoResp.Deferred)
for _, imported := range protoResp.ImportedResources {
resource := providers.ImportedResource{

@ -611,6 +611,9 @@ func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateReques
protoReq := &proto6.ImportResourceState_Request{
TypeName: r.TypeName,
Id: r.ID,
ClientCapabilities: &proto6.ClientCapabilities{
DeferralAllowed: r.ClientCapabilities.DeferralAllowed,
},
}
protoResp, err := p.client.ImportResourceState(p.ctx, protoReq)
@ -619,6 +622,7 @@ func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateReques
return resp
}
resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
resp.Deferred = convert.ProtoToDeferred(protoResp.Deferred)
for _, imported := range protoResp.ImportedResources {
resource := providers.ImportedResource{

@ -425,6 +425,9 @@ type ImportResourceStateRequest struct {
// ID is a string with which the provider can identify the resource to be
// imported.
ID string
// ClientCapabilities contains information about the client's capabilities.
ClientCapabilities ClientCapabilities
}
type ImportResourceStateResponse struct {
@ -436,6 +439,10 @@ type ImportResourceStateResponse struct {
// Diagnostics contains any warnings or errors from the method call.
Diagnostics tfdiags.Diagnostics
// Deferred if present signals that the provider was not able to fully
// complete this operation and a susequent run is required.
Deferred *Deferred
}
// ImportedResource represents an object being imported into Terraform with the

@ -1581,7 +1581,7 @@ output "a" {
}),
},
wantDeferred: map[string]ExpectedDeferred{
"test.a": {Reason: providers.DeferredReasonProviderConfigUnknown, Action: plans.Read},
"test.a": {Reason: providers.DeferredReasonProviderConfigUnknown, Action: plans.Update},
},
complete: false,
},
@ -2025,6 +2025,63 @@ output "a" {
},
},
}
importDeferredTest = deferredActionsTest{
configs: map[string]string{
"main.tf": `
variable "import_id" {
type = string
}
resource "test" "a" {
name = "a"
}
import {
id = var.import_id
to = test.a
}
`,
},
stages: []deferredActionsTestStage{
{
inputs: map[string]cty.Value{
"import_id": cty.StringVal("deferred"), // Telling the test case to defer the import
},
wantPlanned: map[string]cty.Value{
"a": cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("a"),
"upstream_names": cty.NullVal(cty.Set(cty.String)),
"output": cty.UnknownVal(cty.String),
}),
},
wantActions: make(map[string]plans.Action),
wantDeferred: map[string]ExpectedDeferred{
"test.a": {Reason: providers.DeferredReasonProviderConfigUnknown, Action: plans.Create},
},
wantApplied: make(map[string]cty.Value),
wantOutputs: make(map[string]cty.Value),
complete: false,
},
{
inputs: map[string]cty.Value{
"import_id": cty.StringVal("can_be_imported"),
},
wantPlanned: map[string]cty.Value{
"a": cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("a"),
"upstream_names": cty.NullVal(cty.Set(cty.String)),
"output": cty.StringVal("can_be_imported"),
}),
},
wantActions: map[string]plans.Action{
"test.a": plans.Update,
},
wantDeferred: map[string]ExpectedDeferred{},
complete: true,
},
},
}
)
func TestContextApply_deferredActions(t *testing.T) {
@ -2050,6 +2107,7 @@ func TestContextApply_deferredActions(t *testing.T) {
"plan_force_replace_resource_change": planForceReplaceResourceChange,
"plan_delete_resource_change": planDeleteResourceChange,
"plan_destroy_resource_change": planDestroyResourceChange,
"import_deferred": importDeferredTest,
}
for name, test := range tests {
@ -2345,6 +2403,15 @@ func (provider *deferredActionsProvider) Provider() providers.Interface {
}
},
ImportResourceStateFn: func(request providers.ImportResourceStateRequest) providers.ImportResourceStateResponse {
if request.ID == "deferred" {
return providers.ImportResourceStateResponse{
ImportedResources: []providers.ImportedResource{},
Deferred: &providers.Deferred{
Reason: providers.DeferredReasonProviderConfigUnknown,
},
}
}
return providers.ImportResourceStateResponse{
ImportedResources: []providers.ImportedResource{
{

@ -592,7 +592,7 @@ func (n *NodeAbstractResourceInstance) writeChange(ctx EvalContext, change *plan
// refresh does a refresh for a resource
// if the second return value is non-nil, the refresh is deferred
func (n *NodeAbstractResourceInstance) refresh(ctx EvalContext, deposedKey states.DeposedKey, state *states.ResourceInstanceObject) (*states.ResourceInstanceObject, *providers.Deferred, tfdiags.Diagnostics) {
func (n *NodeAbstractResourceInstance) refresh(ctx EvalContext, deposedKey states.DeposedKey, state *states.ResourceInstanceObject, deferralAllowed bool) (*states.ResourceInstanceObject, *providers.Deferred, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
var deferred *providers.Deferred
absAddr := n.Addr
@ -653,7 +653,7 @@ func (n *NodeAbstractResourceInstance) refresh(ctx EvalContext, deposedKey state
Private: state.Private,
ProviderMeta: metaConfigVal,
ClientCapabilities: providers.ClientCapabilities{
DeferralAllowed: ctx.Deferrals().DeferralAllowed(),
DeferralAllowed: deferralAllowed,
},
})
@ -756,7 +756,7 @@ func (n *NodeAbstractResourceInstance) plan(
currentState *states.ResourceInstanceObject,
createBeforeDestroy bool,
forceReplace []addrs.AbsResourceInstance,
) (*plans.ResourceInstanceChange, *states.ResourceInstanceObject, instances.RepetitionData, tfdiags.Diagnostics) {
) (*plans.ResourceInstanceChange, *states.ResourceInstanceObject, *providers.Deferred, instances.RepetitionData, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
var keyData instances.RepetitionData
var deferred *providers.Deferred
@ -764,14 +764,14 @@ func (n *NodeAbstractResourceInstance) plan(
resource := n.Addr.Resource.Resource
provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
if err != nil {
return nil, nil, keyData, diags.Append(err)
return nil, nil, deferred, keyData, diags.Append(err)
}
schema, _ := providerSchema.SchemaForResourceAddr(resource)
if schema == nil {
// Should be caught during validation, so we don't bother with a pretty error here
diags = diags.Append(fmt.Errorf("provider does not support resource type %q", resource.Type))
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
// If we're importing and generating config, generate it now.
@ -785,7 +785,7 @@ func (n *NodeAbstractResourceInstance) plan(
tfdiags.Error,
"Resource has no configuration",
fmt.Sprintf("Terraform attempted to process a resource at %s that has no configuration. This is a bug in Terraform; please report it!", n.Addr.String())))
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
config := *n.Config
@ -813,26 +813,26 @@ func (n *NodeAbstractResourceInstance) plan(
)
diags = diags.Append(checkDiags)
if diags.HasErrors() {
return nil, nil, keyData, diags // failed preconditions prevent further evaluation
return nil, nil, deferred, keyData, diags // failed preconditions prevent further evaluation
}
// If we have a previous plan and the action was a noop, then the only
// reason we're in this method was to evaluate the preconditions. There's
// no need to re-plan this resource.
if plannedChange != nil && plannedChange.Action == plans.NoOp {
return plannedChange, currentState.DeepCopy(), keyData, diags
return plannedChange, currentState.DeepCopy(), deferred, keyData, diags
}
origConfigVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData)
diags = diags.Append(configDiags)
if configDiags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
metaConfigVal, metaDiags := n.providerMetas(ctx)
diags = diags.Append(metaDiags)
if diags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
var priorVal cty.Value
@ -875,7 +875,7 @@ func (n *NodeAbstractResourceInstance) plan(
)
diags = diags.Append(validateResp.Diagnostics.InConfigBody(config.Config, n.Addr.String()))
if diags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
// ignore_changes is meant to only apply to the configuration, so it must
@ -888,7 +888,7 @@ func (n *NodeAbstractResourceInstance) plan(
configValIgnored, ignoreChangeDiags := n.processIgnoreChanges(priorVal, origConfigVal, schema)
diags = diags.Append(ignoreChangeDiags)
if ignoreChangeDiags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
// Create an unmarked version of our config val and our prior val.
@ -904,7 +904,7 @@ func (n *NodeAbstractResourceInstance) plan(
return h.PreDiff(n.HookResourceIdentity(), addrs.NotDeposed, priorVal, proposedNewVal)
}))
if diags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
var resp providers.PlanResourceChangeResponse
@ -942,7 +942,7 @@ func (n *NodeAbstractResourceInstance) plan(
}
diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config, n.Addr.String()))
if diags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
// We mark this node as deferred at a later point when we know the complete change
@ -979,7 +979,7 @@ func (n *NodeAbstractResourceInstance) plan(
}
if diags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
if errs := objchange.AssertPlanValid(schema, unmarkedPriorVal, unmarkedConfigVal, plannedNewVal); len(errs) > 0 {
@ -1009,7 +1009,7 @@ func (n *NodeAbstractResourceInstance) plan(
),
))
}
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
}
}
@ -1030,7 +1030,7 @@ func (n *NodeAbstractResourceInstance) plan(
plannedNewVal, ignoreChangeDiags = n.processIgnoreChanges(unmarkedPriorVal, plannedNewVal, nil)
diags = diags.Append(ignoreChangeDiags)
if ignoreChangeDiags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
}
@ -1047,7 +1047,7 @@ func (n *NodeAbstractResourceInstance) plan(
reqRep, reqRepDiags := getRequiredReplaces(priorVal, plannedNewVal, resp.RequiresReplace, n.ResolvedProvider.Provider, n.Addr)
diags = diags.Append(reqRepDiags)
if diags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
action, actionReason := getAction(n.Addr, unmarkedPriorVal, unmarkedPlannedNewVal, createBeforeDestroy, forceReplace, reqRep)
@ -1104,10 +1104,10 @@ func (n *NodeAbstractResourceInstance) plan(
// append these new diagnostics if there's at least one error inside.
if resp.Diagnostics.HasErrors() {
diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config, n.Addr.String()))
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
if resp.Deferred != nil {
if deferred == nil && resp.Deferred != nil {
deferred = resp.Deferred
}
@ -1129,7 +1129,7 @@ func (n *NodeAbstractResourceInstance) plan(
))
}
if diags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
}
@ -1178,7 +1178,7 @@ func (n *NodeAbstractResourceInstance) plan(
return h.PostDiff(n.HookResourceIdentity(), addrs.NotDeposed, action, priorVal, plannedNewVal)
}))
if diags.HasErrors() {
return nil, nil, keyData, diags
return nil, nil, deferred, keyData, diags
}
// Update our return plan
@ -1200,12 +1200,6 @@ func (n *NodeAbstractResourceInstance) plan(
RequiredReplace: reqRep,
}
// If we defer the change we need to report it and return early
if deferred != nil {
ctx.Deferrals().ReportResourceInstanceDeferred(n.Addr, deferred.Reason, plan)
return nil, nil, keyData, diags
}
// Update our return state
state := &states.ResourceInstanceObject{
// We use the special "planned" status here to note that this
@ -1219,7 +1213,7 @@ func (n *NodeAbstractResourceInstance) plan(
Private: plannedPrivate,
}
return plan, state, keyData, diags
return plan, state, deferred, keyData, diags
}
func (n *NodeAbstractResource) processIgnoreChanges(prior, config cty.Value, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) {

@ -232,7 +232,7 @@ func TestNodeAbstractResourceInstance_refresh_with_deferred_read(t *testing.T) {
resourceGraph := addrs.NewDirectedGraph[addrs.ConfigResource]()
evalCtx.DeferralsState = deferring.NewDeferred(resourceGraph, true)
rio, deferred, diags := node.refresh(evalCtx, states.NotDeposed, obj)
rio, deferred, diags := node.refresh(evalCtx, states.NotDeposed, obj, true)
if diags.HasErrors() {
t.Fatal(diags.Err())
}

@ -275,12 +275,23 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext)
// Make a new diff, in case we've learned new values in the state
// during apply which we can now incorporate.
diffApply, _, repeatData, planDiags := n.plan(ctx, diff, state, false, n.forceReplace)
diffApply, _, deferred, repeatData, planDiags := n.plan(ctx, diff, state, false, n.forceReplace)
diags = diags.Append(planDiags)
if diags.HasErrors() {
return diags
}
if deferred != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Resource deferred during apply, but not during plan",
fmt.Sprintf(
"Terraform has encountered a bug where a provider would mark the resource %q as deferred during apply, but not during plan. This is most likely a bug in the provider. Please file an issue with the provider.", n.Addr,
),
))
return diags
}
// Compare the diffs
diags = diags.Append(n.checkPlannedChange(ctx, diff, diffApply, providerSchema))
if diags.HasErrors() {

@ -121,7 +121,7 @@ func (n *NodePlanDeposedResourceInstanceObject) Execute(ctx EvalContext, op walk
// resource during Delete correctly. If this is a simple refresh,
// Terraform is expected to remove the missing resource from the state
// entirely
refreshedState, deferred, refreshDiags := n.refresh(ctx, n.DeposedKey, state)
refreshedState, deferred, refreshDiags := n.refresh(ctx, n.DeposedKey, state, ctx.Deferrals().DeferralAllowed())
diags = diags.Append(refreshDiags)
if diags.HasErrors() {
return diags

@ -97,6 +97,9 @@ func (n *graphNodeImportState) Execute(ctx EvalContext, op walkOperation) (diags
resp := provider.ImportResourceState(providers.ImportResourceStateRequest{
TypeName: n.Addr.Resource.Resource.Type,
ID: n.ID,
ClientCapabilities: providers.ClientCapabilities{
DeferralAllowed: false,
},
})
diags = diags.Append(resp.Diagnostics)
if diags.HasErrors() {
@ -228,7 +231,7 @@ func (n *graphNodeImportStateSub) Execute(ctx EvalContext, op walkOperation) (di
ResolvedProvider: n.ResolvedProvider,
},
}
state, deferred, refreshDiags := riNode.refresh(ctx, states.NotDeposed, state)
state, deferred, refreshDiags := riNode.refresh(ctx, states.NotDeposed, state, false)
diags = diags.Append(refreshDiags)
if diags.HasErrors() {
return diags
@ -242,6 +245,7 @@ func (n *graphNodeImportStateSub) Execute(ctx EvalContext, op walkOperation) (di
fmt.Sprintf(
"While attempting to import an existing object to %q, "+
"the provider deferred reading the resource. "+
"This is a bug in the provider since deferrals are not supported when importing through the CLI, please file an issue."+
"Please either use an import block for importing this resource "+
"or remove the to be imported resource from your configuration, "+
"apply the configuration using \"terraform apply\", "+

@ -163,10 +163,12 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
importing := n.importTarget.IDString != ""
importId := n.importTarget.IDString
var deferred *providers.Deferred
// If the resource is to be imported, we now ask the provider for an Import
// and a Refresh, and save the resulting state to instanceRefreshState.
if importing {
instanceRefreshState, diags = n.importState(ctx, addr, importId, provider, providerSchema)
instanceRefreshState, deferred, diags = n.importState(ctx, addr, importId, provider, providerSchema)
} else {
var readDiags tfdiags.Diagnostics
instanceRefreshState, readDiags = n.readResourceInstanceState(ctx, addr)
@ -176,20 +178,22 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
}
}
// We'll save a snapshot of what we just read from the state into the
// prevRunState before we do anything else, since this will capture the
// result of any schema upgrading that readResourceInstanceState just did,
// but not include any out-of-band changes we might detect in in the
// refresh step below.
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, prevRunState))
if diags.HasErrors() {
return diags
}
// Also the refreshState, because that should still reflect schema upgrades
// even if it doesn't reflect upstream changes.
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
if diags.HasErrors() {
return diags
if deferred == nil {
// We'll save a snapshot of what we just read from the state into the
// prevRunState before we do anything else, since this will capture the
// result of any schema upgrading that readResourceInstanceState just did,
// but not include any out-of-band changes we might detect in in the
// refresh step below.
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, prevRunState))
if diags.HasErrors() {
return diags
}
// Also the refreshState, because that should still reflect schema upgrades
// even if it doesn't reflect upstream changes.
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
if diags.HasErrors() {
return diags
}
}
// In 0.13 we could be refreshing a resource with no config.
@ -202,28 +206,21 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
}
}
var refreshDeferred *providers.Deferred
// This is the state of the resource before we refresh the value, we need to keep track
// of this to report this as the before value if the refresh is deferred.
priorInstanceRefreshState := instanceRefreshState
// Refresh, maybe
// The import process handles its own refresh
if !n.skipRefresh && !importing {
s, deferred, refreshDiags := n.refresh(ctx, states.NotDeposed, instanceRefreshState)
var refreshDiags tfdiags.Diagnostics
instanceRefreshState, refreshDeferred, refreshDiags = n.refresh(ctx, states.NotDeposed, instanceRefreshState, ctx.Deferrals().DeferralAllowed())
diags = diags.Append(refreshDiags)
if diags.HasErrors() {
return diags
}
if deferred == nil {
instanceRefreshState = s
} else {
ctx.Deferrals().ReportResourceInstanceDeferred(n.Addr, deferred.Reason, &plans.ResourceInstanceChange{
Addr: n.Addr,
Change: plans.Change{
Action: plans.Read,
Before: s.Value,
After: cty.DynamicVal,
},
})
}
if instanceRefreshState != nil {
// When refreshing we start by merging the stored dependencies and
// the configured dependencies. The configured dependencies will be
@ -233,7 +230,13 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
instanceRefreshState.Dependencies = mergeDeps(n.Dependencies, instanceRefreshState.Dependencies)
}
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
if deferred == nil && refreshDeferred != nil {
deferred = refreshDeferred
}
if deferred == nil {
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
}
if diags.HasErrors() {
return diags
}
@ -258,7 +261,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
return diags
}
change, instancePlanState, repeatData, planDiags := n.plan(
change, instancePlanState, planDeferred, repeatData, planDiags := n.plan(
ctx, nil, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace,
)
diags = diags.Append(planDiags)
@ -266,7 +269,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
// If we are importing and generating a configuration, we need to
// ensure the change is written out so the configuration can be
// captured.
if len(n.generateConfigPath) > 0 {
if planDeferred == nil && len(n.generateConfigPath) > 0 {
// Update our return plan
change := &plans.ResourceInstanceChange{
Addr: n.Addr,
@ -286,6 +289,10 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
return diags
}
if deferred == nil && planDeferred != nil {
deferred = planDeferred
}
if importing {
change.Importing = &plans.Importing{ID: importId}
}
@ -298,10 +305,11 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
}
deferrals := ctx.Deferrals()
if deferrals.IsResourceInstanceDeferred(n.Addr) {
// This resource instance is already deferred, probably because it
// was deferred during the refresh or import step.
if deferred != nil {
// Then this resource has been deferred either during the import,
// refresh or planning stage. We'll report the deferral and
// store what we could produce in the deferral tracker.
deferrals.ReportResourceInstanceDeferred(addr, deferred.Reason, change)
} else if !deferrals.ShouldDeferResourceInstanceChanges(n.Addr) {
// We intentionally write the change before the subsequent checks, because
// all of the checks below this point are for problems caused by the
@ -417,6 +425,19 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
checkRuleSeverity,
)
diags = diags.Append(checkDiags)
// In this case we skipped planning changes and therefore need to report the deferral
// here, if there was one.
if refreshDeferred != nil {
ctx.Deferrals().ReportResourceInstanceDeferred(addr, deferred.Reason, &plans.ResourceInstanceChange{
Addr: n.Addr,
Change: plans.Change{
Action: plans.Read,
Before: priorInstanceRefreshState.Value,
After: instanceRefreshState.Value,
},
})
}
}
return diags
@ -455,7 +476,7 @@ func (n *NodePlannableResourceInstance) replaceTriggered(ctx EvalContext, repDat
return diags
}
func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.AbsResourceInstance, importId string, provider providers.Interface, providerSchema providers.ProviderSchema) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.AbsResourceInstance, importId string, provider providers.Interface, providerSchema providers.ProviderSchema) (*states.ResourceInstanceObject, *providers.Deferred, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
absAddr := addr.Resource.Absolute(ctx.Path())
hookResourceID := HookResourceIdentity{
@ -463,18 +484,20 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
ProviderAddr: n.ResolvedProvider.Provider,
}
var deferred *providers.Deferred
diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {
return h.PrePlanImport(hookResourceID, importId)
}))
if diags.HasErrors() {
return nil, diags
return nil, deferred, diags
}
schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.Resource.Resource)
if schema == nil {
// Should be caught during validation, so we don't bother with a pretty error here
diags = diags.Append(fmt.Errorf("provider does not support resource type for %q", n.Addr))
return nil, diags
return nil, deferred, diags
}
var resp providers.ImportResourceStateResponse
@ -493,7 +516,7 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
// document the expectation somewhere. This shouldn't happen in
// production, so we don't bother with a pretty error.
diags = diags.Append(fmt.Errorf("override blocks do not support config generation"))
return nil, diags
return nil, deferred, diags
}
forEach, _, _ := evaluateForEachExpression(n.Config.ForEach, ctx, false)
@ -511,7 +534,7 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
// later), so only add the configDiags into the main diags if we
// found actual errors.
diags = diags.Append(configDiags)
return nil, diags
return nil, deferred, diags
}
configVal, _ = configVal.UnmarkDeep()
@ -534,29 +557,19 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
resp = provider.ImportResourceState(providers.ImportResourceStateRequest{
TypeName: addr.Resource.Resource.Type,
ID: importId,
ClientCapabilities: providers.ClientCapabilities{
DeferralAllowed: ctx.Deferrals().DeferralAllowed(),
},
})
}
diags = diags.Append(resp.Diagnostics)
deferred = resp.Deferred
if diags.HasErrors() {
return nil, diags
return nil, deferred, diags
}
imported := resp.ImportedResources
if len(imported) == 0 {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Import returned no resources",
fmt.Sprintf("While attempting to import with ID %s, the provider"+
"returned no instance states.",
importId,
),
))
return nil, diags
}
for _, obj := range imported {
log.Printf("[TRACE] graphNodeImportState: import %s %q produced instance object of type %s", absAddr.String(), importId, obj.TypeName)
}
if len(imported) > 1 {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
@ -567,22 +580,57 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
importId,
),
))
return nil, diags
}
// call post-import hook
diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostPlanImport(hookResourceID, imported)
}))
if len(imported) == 0 {
if imported[0].TypeName == "" {
diags = diags.Append(fmt.Errorf("import of %s didn't set type", n.Addr.String()))
return nil, diags
// Sanity check against the providers. If the provider defers the response, it may not have been able to return a state, so we'll only error if no deferral was returned.
if deferred == nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Import returned no resources",
fmt.Sprintf("While attempting to import with ID %s, the provider"+
"returned no instance states.",
importId,
),
))
return nil, deferred, diags
}
// If we were deferred, then let's make up a resource to represent the
// state we're going to import.
state := providers.ImportedResource{
TypeName: addr.Resource.Resource.Type,
State: cty.NullVal(schema.ImpliedType()),
}
// We skip the read and further validation since we make up the state
// of the imported resource anyways.
return state.AsInstanceObject(), deferred, diags
}
for _, obj := range imported {
log.Printf("[TRACE] graphNodeImportState: import %s %q produced instance object of type %s", absAddr.String(), importId, obj.TypeName)
}
importedState := imported[0].AsInstanceObject()
if importedState.Value.IsNull() {
// We can only call the hooks and validate the imported state if we have
// actually done the import.
if resp.Deferred == nil {
// call post-import hook
diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostPlanImport(hookResourceID, imported)
}))
}
if imported[0].TypeName == "" {
diags = diags.Append(fmt.Errorf("import of %s didn't set type", n.Addr.String()))
return nil, deferred, diags
}
if deferred == nil && importedState.Value.IsNull() {
// It's actually okay for a deferred import to have returned a null.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Import returned null resource",
@ -591,6 +639,7 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
n.importTarget.IDString,
),
))
}
// refresh
@ -601,24 +650,19 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
},
override: n.override,
}
instanceRefreshState, deferred, refreshDiags := riNode.refresh(ctx, states.NotDeposed, importedState)
instanceRefreshState, refreshDeferred, refreshDiags := riNode.refresh(ctx, states.NotDeposed, importedState, ctx.Deferrals().DeferralAllowed())
diags = diags.Append(refreshDiags)
if diags.HasErrors() {
return instanceRefreshState, diags
return instanceRefreshState, deferred, diags
}
if deferred != nil {
ctx.Deferrals().ReportResourceInstanceDeferred(n.Addr, deferred.Reason, &plans.ResourceInstanceChange{
Addr: n.Addr,
Change: plans.Change{
Action: plans.Read,
After: instanceRefreshState.Value,
},
})
// report the refresh was deferred, we don't need to error since the import step succeeded
if deferred == nil && refreshDeferred != nil {
deferred = refreshDeferred
}
// verify the existence of the imported resource
if instanceRefreshState.Value.IsNull() && deferred == nil {
if refreshDeferred == nil && instanceRefreshState.Value.IsNull() {
var diags tfdiags.Diagnostics
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
@ -633,13 +677,15 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
n.Addr,
),
))
return instanceRefreshState, diags
return instanceRefreshState, deferred, diags
}
// If we're importing and generating config, generate it now.
if len(n.generateConfigPath) > 0 {
// If we're importing and generating config, generate it now. We only
// generate config if the import isn't being deferred. We should generate
// the configuration in the plan that the import is actually happening in.
if deferred == nil && len(n.generateConfigPath) > 0 {
if n.Config != nil {
return instanceRefreshState, diags.Append(fmt.Errorf("tried to generate config for %s, but it already exists", n.Addr))
return instanceRefreshState, nil, diags.Append(fmt.Errorf("tried to generate config for %s, but it already exists", n.Addr))
}
// Generate the HCL string first, then parse the HCL body from it.
@ -655,7 +701,7 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
synthHCLFile, hclDiags := hclsyntax.ParseConfig([]byte(generatedHCLAttributes), filepath.Base(n.generateConfigPath), hcl.Pos{Byte: 0, Line: 1, Column: 1})
diags = diags.Append(hclDiags)
if hclDiags.HasErrors() {
return instanceRefreshState, diags
return instanceRefreshState, nil, diags
}
// We have to do a kind of mini parsing of the content here to correctly
@ -665,7 +711,7 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
_, remain, resourceDiags := synthHCLFile.Body.PartialContent(configs.ResourceBlockSchema)
diags = diags.Append(resourceDiags)
if resourceDiags.HasErrors() {
return instanceRefreshState, diags
return instanceRefreshState, nil, diags
}
n.Config = &configs.Resource{
@ -678,8 +724,13 @@ func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.
}
}
diags = diags.Append(riNode.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
return instanceRefreshState, diags
if deferred == nil {
// Only write the state if the change isn't being deferred. We're also
// reporting the deferred status to the caller, so they should know
// not to read from the state.
diags = diags.Append(riNode.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
}
return instanceRefreshState, deferred, diags
}
// generateHCLStringAttributes produces a string in HCL format for the given

@ -117,7 +117,7 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalCon
// plan before apply, and may not handle a missing resource during
// Delete correctly. If this is a simple refresh, Terraform is
// expected to remove the missing resource from the state entirely
refreshedState, deferred, refreshDiags := n.refresh(ctx, states.NotDeposed, oldState)
refreshedState, deferred, refreshDiags := n.refresh(ctx, states.NotDeposed, oldState, ctx.Deferrals().DeferralAllowed())
diags = diags.Append(refreshDiags)
if diags.HasErrors() {
return diags

Loading…
Cancel
Save