Merge branch 'master' into datasource_documentation

pull/10489/head
Megan Marsh 5 years ago committed by GitHub
commit a4a70e0bb8

@ -70,9 +70,6 @@ type Driver interface {
// DeleteOSLoginSSHKey deletes the SSH public key for OSLogin with the given key.
DeleteOSLoginSSHKey(user, fingerprint string) error
// If packer is running on a GCE, derives the user from it for use with OSLogin.
GetOSLoginUserFromGCE() string
// Add to the instance metadata for the existing instance
AddToInstanceMetadata(zone string, name string, metadata map[string]string) error
}

@ -10,11 +10,9 @@ import (
"errors"
"fmt"
"log"
"net/http"
"strings"
"time"
metadata "cloud.google.com/go/compute/metadata"
compute "google.golang.org/api/compute/v1"
"google.golang.org/api/option"
oslogin "google.golang.org/api/oslogin/v1"
@ -34,7 +32,6 @@ import (
type driverGCE struct {
projectId string
service *compute.Service
thisGCEUser string
osLoginService *oslogin.Service
ui packersdk.Ui
}
@ -136,8 +133,6 @@ func NewClientOptionGoogle(account *ServiceAccount, vaultOauth string, impersona
func NewDriverGCE(config GCEDriverConfig) (Driver, error) {
var thisGCEUser string
opts, err := NewClientOptionGoogle(config.Account, config.VaultOauthEngineName, config.ImpersonateServiceAccountName)
if err != nil {
return nil, err
@ -149,15 +144,6 @@ func NewDriverGCE(config GCEDriverConfig) (Driver, error) {
return nil, err
}
if metadata.OnGCE() {
log.Printf("[INFO] On GCE, capture service account for OSLogin...")
thisGCEUser, err = metadata.NewClient(&http.Client{}).Email("")
if err != nil {
return nil, err
}
}
log.Printf("[INFO] Instantiating OS Login client...")
osLoginService, err := oslogin.NewService(context.TODO(), opts)
if err != nil {
@ -170,7 +156,6 @@ func NewDriverGCE(config GCEDriverConfig) (Driver, error) {
return &driverGCE{
projectId: config.ProjectId,
service: service,
thisGCEUser: thisGCEUser,
osLoginService: osLoginService,
ui: config.Ui,
}, nil
@ -644,10 +629,6 @@ func (d *driverGCE) getPasswordResponses(zone, instance string) ([]windowsPasswo
return passwordResponses, nil
}
func (d *driverGCE) GetOSLoginUserFromGCE() string {
return d.thisGCEUser
}
func (d *driverGCE) ImportOSLoginSSHKey(user, sshPublicKey string) (*oslogin.LoginProfile, error) {
parent := fmt.Sprintf("users/%s", user)

@ -94,8 +94,6 @@ type DriverMock struct {
AddToInstanceMetadataKVPairs map[string]string
AddToInstanceMetadataErrCh <-chan error
AddToInstanceMetadataErr error
OSLoginUserFromGCE string
}
func (d *DriverMock) CreateImage(name, description, family, zone, disk string, image_labels map[string]string, image_licenses []string, image_encryption_key *compute.CustomerEncryptionKey, imageStorageLocations []string) (<-chan *Image, <-chan error) {
@ -310,7 +308,3 @@ func (d *DriverMock) AddToInstanceMetadata(zone string, name string, metadata ma
return nil
}
func (d *DriverMock) GetOSLoginUserFromGCE() string {
return d.OSLoginUserFromGCE
}

@ -5,7 +5,11 @@ import (
"crypto/sha256"
"encoding/hex"
"fmt"
"log"
"net/http"
"time"
metadata "cloud.google.com/go/compute/metadata"
"github.com/hashicorp/packer-plugin-sdk/multistep"
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
"google.golang.org/api/oauth2/v2"
@ -37,7 +41,7 @@ func (s *StepImportOSLoginSSHKey) Run(ctx context.Context, state multistep.State
}
// Are we running packer on a GCE ?
s.accountEmail = driver.GetOSLoginUserFromGCE()
s.accountEmail = getGCEUser()
if s.TokeninfoFunc == nil && s.accountEmail == "" {
s.TokeninfoFunc = tokeninfo
@ -140,3 +144,28 @@ func tokeninfo(ctx context.Context) (*oauth2.Tokeninfo, error) {
return svc.Tokeninfo().Context(ctx).Do()
}
// getGCEUser determines if we're running packer on a GCE, and if we are, gets the associated service account email for subsequent use with OSLogin.
// There are cases where we are running on a GCE, but the GCP metadata server isn't accessible. GitLab docker-engine runners are an edge case example of this.
// It makes little sense to run packer on GCP in this way, however, we defensively timeout in those cases, rather than abort.
func getGCEUser() string {
metadataCheckTimeout := 5 * time.Second
metadataCheckChl := make(chan string, 1)
go func() {
if metadata.OnGCE() {
GCEUser, _ := metadata.NewClient(&http.Client{}).Email("")
metadataCheckChl <- GCEUser
}
}()
select {
case thisGCEUser := <-metadataCheckChl:
log.Printf("[INFO] OSLogin: GCE service account %s will be used for identity", thisGCEUser)
return thisGCEUser
case <-time.After(metadataCheckTimeout):
log.Printf("[INFO] OSLogin: Could not derive a GCE service account from google metadata server after %s", metadataCheckTimeout)
return ""
}
}

@ -150,37 +150,3 @@ func TestStepImportOSLoginSSHKey_withPrivateSSHKey(t *testing.T) {
t.Errorf("expected to not see a public key when using a dedicated private key, but got %q", pubKey)
}
}
func TestStepImportOSLoginSSHKey_onGCE(t *testing.T) {
state := testState(t)
d := state.Get("driver").(*DriverMock)
step := new(StepImportOSLoginSSHKey)
defer step.Cleanup(state)
fakeAccountEmail := "testing@packer.io"
config := state.Get("config").(*Config)
config.UseOSLogin = true
config.Comm.SSHPublicKey = []byte{'k', 'e', 'y'}
d.OSLoginUserFromGCE = fakeAccountEmail
if action := step.Run(context.Background(), state); action != multistep.ActionContinue {
t.Fatalf("bad action: %#v", action)
}
if step.accountEmail != fakeAccountEmail {
t.Fatalf("expected accountEmail to be %q but got %q", fakeAccountEmail, step.accountEmail)
}
pubKey, ok := state.GetOk("ssh_key_public_sha256")
if !ok {
t.Fatal("expected to see a public key")
}
sha256sum := sha256.Sum256(config.Comm.SSHPublicKey)
if pubKey != hex.EncodeToString(sha256sum[:]) {
t.Errorf("expected to see a matching public key, but got %q", pubKey)
}
}

@ -46,6 +46,7 @@ type FlatConfig struct {
ImageDiskFormat *string `mapstructure:"image_disk_format" required:"false" cty:"image_disk_format" hcl:"image_disk_format"`
ImageTags []string `mapstructure:"image_tags" required:"false" cty:"image_tags" hcl:"image_tags"`
ImageMinDisk *int `mapstructure:"image_min_disk" required:"false" cty:"image_min_disk" hcl:"image_min_disk"`
SkipCreateImage *bool `mapstructure:"skip_create_image" required:"false" cty:"skip_create_image" hcl:"skip_create_image"`
Type *string `mapstructure:"communicator" cty:"communicator" hcl:"communicator"`
PauseBeforeConnect *string `mapstructure:"pause_before_connecting" cty:"pause_before_connecting" hcl:"pause_before_connecting"`
SSHHost *string `mapstructure:"ssh_host" cty:"ssh_host" hcl:"ssh_host"`
@ -177,6 +178,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
"image_disk_format": &hcldec.AttrSpec{Name: "image_disk_format", Type: cty.String, Required: false},
"image_tags": &hcldec.AttrSpec{Name: "image_tags", Type: cty.List(cty.String), Required: false},
"image_min_disk": &hcldec.AttrSpec{Name: "image_min_disk", Type: cty.Number, Required: false},
"skip_create_image": &hcldec.AttrSpec{Name: "skip_create_image", Type: cty.Bool, Required: false},
"communicator": &hcldec.AttrSpec{Name: "communicator", Type: cty.String, Required: false},
"pause_before_connecting": &hcldec.AttrSpec{Name: "pause_before_connecting", Type: cty.String, Required: false},
"ssh_host": &hcldec.AttrSpec{Name: "ssh_host", Type: cty.String, Required: false},

@ -33,6 +33,8 @@ type ImageConfig struct {
ImageTags []string `mapstructure:"image_tags" required:"false"`
// Minimum disk size needed to boot image, in gigabytes.
ImageMinDisk int `mapstructure:"image_min_disk" required:"false"`
// Skip creating the image. Useful for setting to `true` during a build test stage. Defaults to `false`.
SkipCreateImage bool `mapstructure:"skip_create_image" required:"false"`
}
func (c *ImageConfig) Prepare(ctx *interpolate.Context) []error {

@ -12,10 +12,16 @@ import (
type stepAddImageMembers struct{}
func (s *stepAddImageMembers) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
imageId := state.Get("image").(string)
ui := state.Get("ui").(packersdk.Ui)
config := state.Get("config").(*Config)
if config.SkipCreateImage {
ui.Say("Skipping image add members...")
return multistep.ActionContinue
}
imageId := state.Get("image").(string)
if len(config.ImageMembers) == 0 {
return multistep.ActionContinue
}

@ -24,6 +24,11 @@ func (s *stepCreateImage) Run(ctx context.Context, state multistep.StateBag) mul
server := state.Get("server").(*servers.Server)
ui := state.Get("ui").(packersdk.Ui)
if config.SkipCreateImage {
ui.Say("Skipping image creation...")
return multistep.ActionContinue
}
// We need the v2 compute client
computeClient, err := config.computeV2Client()
if err != nil {

@ -12,10 +12,16 @@ import (
type stepUpdateImageMinDisk struct{}
func (s *stepUpdateImageMinDisk) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
imageId := state.Get("image").(string)
ui := state.Get("ui").(packersdk.Ui)
config := state.Get("config").(*Config)
if config.SkipCreateImage {
ui.Say("Skipping image update mindisk...")
return multistep.ActionContinue
}
imageId := state.Get("image").(string)
if config.ImageMinDisk == 0 {
return multistep.ActionContinue
}

@ -13,10 +13,16 @@ import (
type stepUpdateImageTags struct{}
func (s *stepUpdateImageTags) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
imageId := state.Get("image").(string)
ui := state.Get("ui").(packersdk.Ui)
config := state.Get("config").(*Config)
if config.SkipCreateImage {
ui.Say("Skipping image update tags...")
return multistep.ActionContinue
}
imageId := state.Get("image").(string)
if len(config.ImageTags) == 0 {
return multistep.ActionContinue
}

@ -12,10 +12,16 @@ import (
type stepUpdateImageVisibility struct{}
func (s *stepUpdateImageVisibility) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
imageId := state.Get("image").(string)
ui := state.Get("ui").(packersdk.Ui)
config := state.Get("config").(*Config)
if config.SkipCreateImage {
ui.Say("Skipping image update visibility...")
return multistep.ActionContinue
}
imageId := state.Get("image").(string)
if config.ImageVisibility == "" {
return multistep.ActionContinue
}

@ -114,7 +114,7 @@ func (s *StepRegisterOMI) Cleanup(state multistep.StateBag) {
}
}
func (s *StepRegisterOMI) combineDevices(snapshotIds map[string]string) []osc.BlockDeviceMappingImage {
func (s *StepRegisterOMI) combineDevices(snapshotIDs map[string]string) []osc.BlockDeviceMappingImage {
devices := map[string]osc.BlockDeviceMappingImage{}
for _, device := range s.OMIDevices {
@ -125,12 +125,20 @@ func (s *StepRegisterOMI) combineDevices(snapshotIds map[string]string) []osc.Bl
// the same name in ami_block_device_mappings, except for the
// one designated as the root device in ami_root_device
for _, device := range s.LaunchDevices {
snapshotId, ok := snapshotIds[device.DeviceName]
snapshotID, ok := snapshotIDs[device.DeviceName]
if ok {
device.Bsu.SnapshotId = snapshotId
device.Bsu.SnapshotId = snapshotID
}
if device.DeviceName == s.RootDevice.SourceDeviceName {
device.DeviceName = s.RootDevice.DeviceName
if device.Bsu.VolumeType != "" {
device.Bsu.VolumeType = s.RootDevice.VolumeType
if device.Bsu.VolumeType != "io1" {
device.Bsu.Iops = 0
}
}
}
devices[device.DeviceName] = copyToDeviceMappingImage(device)
}

@ -72,6 +72,11 @@ func (s *stepFinalizeTemplateConfig) Run(ctx context.Context, state multistep.St
ui.Error(err.Error())
return multistep.ActionHalt
}
} else {
err := fmt.Errorf("cloud_init is set to true, but cloud_init_storage_pool is empty and could not be set automatically. set cloud_init_storage_pool in your configuration")
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
}

@ -7,6 +7,7 @@ import (
"io"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
texttemplate "text/template"
@ -14,6 +15,7 @@ import (
"github.com/hashicorp/hcl/v2/hclwrite"
"github.com/hashicorp/packer-plugin-sdk/template"
hcl2shim "github.com/hashicorp/packer/hcl2template/shim"
"github.com/mitchellh/mapstructure"
"github.com/posener/complete"
"github.com/zclconf/go-cty/cty"
)
@ -55,7 +57,7 @@ func (c *HCL2UpgradeCommand) ParseArgs(args []string) (*HCL2UpgradeArgs, int) {
}
const (
hcl2UpgradeFileHeader = `# This file was autogenerated by the BETA 'packer hcl2_upgrade' command. We
hcl2UpgradeFileHeader = `# This file was autogenerated by the 'packer hcl2_upgrade' command. We
# recommend double checking that everything is correct before going forward. We
# also recommend treating this file as disposable. The HCL2 blocks in this
# file can be moved to other files. For example, the variable blocks could be
@ -91,10 +93,14 @@ const (
# https://www.packer.io/docs/templates/hcl_templates/blocks/build
build {
`
amazonAmiDataHeader = `
# The amazon-ami data block is generated from your amazon builder source_ami_filter; a data
# from this block can be referenced in source and locals blocks.
# Read the documentation for data blocks here:
# https://www.packer.io/docs/templates/hcl_templates/blocks/data`
)
func (c *HCL2UpgradeCommand) RunContext(buildCtx context.Context, cla *HCL2UpgradeArgs) int {
out := &bytes.Buffer{}
var output io.Writer
if err := os.MkdirAll(filepath.Dir(cla.OutputFile), 0); err != nil {
@ -179,11 +185,16 @@ func (c *HCL2UpgradeCommand) RunContext(buildCtx context.Context, cla *HCL2Upgra
for _, builder := range tpl.Builders {
builders = append(builders, builder)
}
sort.Slice(builders, func(i, j int) bool {
return builders[i].Type+builders[i].Name < builders[j].Type+builders[j].Name
})
}
if err := c.writeAmazonAmiDatasource(builders, out); err != nil {
return 1
}
sort.Slice(builders, func(i, j int) bool {
return builders[i].Type+builders[i].Name < builders[j].Type+builders[j].Name
})
out.Write([]byte(sourcesHeader))
for i, builderCfg := range builders {
@ -287,6 +298,61 @@ func (c *HCL2UpgradeCommand) RunContext(buildCtx context.Context, cla *HCL2Upgra
return 0
}
func (c *HCL2UpgradeCommand) writeAmazonAmiDatasource(builders []*template.Builder, out *bytes.Buffer) error {
amazonAmiFilters := []map[string]interface{}{}
first := true
i := 1
for _, builder := range builders {
if strings.HasPrefix(builder.Type, "amazon-") {
if sourceAmiFilter, ok := builder.Config["source_ami_filter"]; ok {
sourceAmiFilterCfg := map[string]interface{}{}
if err := mapstructure.Decode(sourceAmiFilter, &sourceAmiFilterCfg); err != nil {
c.Ui.Error(fmt.Sprintf("Failed to write amazon-ami data source: %v", err))
return err
}
duplicate := false
dataSourceName := fmt.Sprintf("autogenerated_%d", i)
for j, filter := range amazonAmiFilters {
if reflect.DeepEqual(filter, sourceAmiFilter) {
duplicate = true
dataSourceName = fmt.Sprintf("autogenerated_%d", j+1)
continue
}
}
// This is a hack...
// Use templating so that it could be correctly transformed later into a data resource
sourceAmiDataRef := fmt.Sprintf("{{ data `amazon-ami.%s.id` }}", dataSourceName)
if duplicate {
delete(builder.Config, "source_ami_filter")
builder.Config["source_ami"] = sourceAmiDataRef
continue
}
amazonAmiFilters = append(amazonAmiFilters, sourceAmiFilterCfg)
delete(builder.Config, "source_ami_filter")
builder.Config["source_ami"] = sourceAmiDataRef
i++
if first {
out.Write([]byte(amazonAmiDataHeader))
first = false
}
datasourceContent := hclwrite.NewEmptyFile()
body := datasourceContent.Body()
body.AppendNewline()
sourceBody := body.AppendNewBlock("data", []string{"amazon-ami", dataSourceName}).Body()
jsonBodyToHCL2Body(sourceBody, sourceAmiFilterCfg)
_, _ = out.Write(transposeTemplatingCalls(datasourceContent.Bytes()))
}
}
}
return nil
}
type UnhandleableArgumentError struct {
Call string
Correspondance string
@ -327,6 +393,9 @@ func transposeTemplatingCalls(s []byte) []byte {
"build": func(a string) string {
return fmt.Sprintf("${build.%s}", a)
},
"data": func(a string) string {
return fmt.Sprintf("${data.%s}", a)
},
"template_dir": func() string {
return fmt.Sprintf("${path.root}")
},

@ -1,4 +1,4 @@
# This file was autogenerated by the BETA 'packer hcl2_upgrade' command. We
# This file was autogenerated by the 'packer hcl2_upgrade' command. We
# recommend double checking that everything is correct before going forward. We
# also recommend treating this file as disposable. The HCL2 blocks in this
# file can be moved to other files. For example, the variable blocks could be
@ -50,6 +50,20 @@ variable "secret_account" {
# "timestamp" template function replacement
locals { timestamp = regex_replace(timestamp(), "[- TZ:]", "") }
# The amazon-ami data block is generated from your amazon builder source_ami_filter; a data
# from this block can be referenced in source and locals blocks.
# Read the documentation for data blocks here:
# https://www.packer.io/docs/templates/hcl_templates/blocks/data
data "amazon-ami" "autogenerated_1" {
filters = {
name = "ubuntu/images/*/ubuntu-xenial-16.04-amd64-server-*"
root-device-type = "ebs"
virtualization-type = "hvm"
}
most_recent = true
owners = ["099720109477"]
}
# source blocks are generated from your builders; a source can be referenced in
# build blocks. A build block runs provisioner and post-processors on a
# source. Read the documentation for source blocks here:
@ -65,17 +79,37 @@ source "amazon-ebs" "autogenerated_1" {
volume_size = 48
volume_type = "gp2"
}
region = "${var.aws_region}"
secret_key = "${var.aws_secret_key}"
source_ami_filter {
filters = {
name = "ubuntu/images/*/ubuntu-xenial-16.04-amd64-server-*"
root-device-type = "ebs"
virtualization-type = "hvm"
region = "${var.aws_region}"
secret_key = "${var.aws_secret_key}"
source_ami = "${data.amazon-ami.autogenerated_1.id}"
spot_instance_types = ["t2.small", "t2.medium", "t2.large"]
spot_price = "0.0075"
ssh_interface = "session_manager"
ssh_username = "ubuntu"
temporary_iam_instance_profile_policy_document {
Statement {
Action = ["*"]
Effect = "Allow"
Resource = ["*"]
}
most_recent = true
owners = ["099720109477"]
Version = "2012-10-17"
}
}
source "amazon-ebs" "named_builder" {
access_key = "${var.aws_access_key}"
ami_description = "Ubuntu 16.04 LTS - expand root partition"
ami_name = "ubuntu-16-04-test-${local.timestamp}"
encrypt_boot = true
launch_block_device_mappings {
delete_on_termination = true
device_name = "/dev/sda1"
volume_size = 48
volume_type = "gp2"
}
region = "${var.aws_region}"
secret_key = "${var.aws_secret_key}"
source_ami = "${data.amazon-ami.autogenerated_1.id}"
spot_instance_types = ["t2.small", "t2.medium", "t2.large"]
spot_price = "0.0075"
ssh_interface = "session_manager"
@ -94,7 +128,7 @@ source "amazon-ebs" "autogenerated_1" {
# documentation for build blocks can be found here:
# https://www.packer.io/docs/templates/hcl_templates/blocks/build
build {
sources = ["source.amazon-ebs.autogenerated_1"]
sources = ["source.amazon-ebs.autogenerated_1", "source.amazon-ebs.named_builder"]
provisioner "shell" {
except = ["amazon-ebs"]

@ -61,6 +61,55 @@
]
},
"ssh_interface": "session_manager"
},
{
"type": "amazon-ebs",
"name": "named_builder",
"region": "{{ user `aws_region` }}",
"secret_key": "{{ user `aws_secret_key` }}",
"access_key": "{{ user `aws_access_key` }}",
"ami_name": "ubuntu-16-04-test-{{ timestamp }}",
"ami_description": "Ubuntu 16.04 LTS - expand root partition",
"source_ami_filter": {
"filters": {
"virtualization-type": "hvm",
"name": "ubuntu/images/*/ubuntu-xenial-16.04-amd64-server-*",
"root-device-type": "ebs"
},
"owners": [
"099720109477"
],
"most_recent": true
},
"launch_block_device_mappings": [
{
"delete_on_termination": true,
"device_name": "/dev/sda1",
"volume_type": "gp2",
"volume_size": 48
}
],
"spot_price": "0.0075",
"spot_instance_types": [
"t2.small",
"t2.medium",
"t2.large"
],
"encrypt_boot": true,
"ssh_username": "ubuntu",
"temporary_iam_instance_profile_policy_document": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"*"
],
"Resource": ["*"]
}
]
},
"ssh_interface": "session_manager"
}
],
"provisioners": [

@ -26,6 +26,12 @@
},
{
"pattern": "^/docs/extending/custom-datasources"
},
{
"pattern": "^/docs/templates/legacy_json_templates"
},
{
"pattern": "^https://packer.io/docs/templates/legacy_json_templates"
}
],
"replacementPatterns": [

@ -8,6 +8,8 @@ page_title: Builders - Templates
sidebar_title: Builders
---
`@include 'from-1.5/legacy-json-warning.mdx'`
# Template Builders
Within the template, the builders section contains an array of all the builders

@ -6,6 +6,8 @@ page_title: Communicators - Templates
sidebar_title: Communicators
---
`@include 'from-1.5/legacy-json-warning.mdx'`
# Template Communicators
Communicators are the mechanism Packer uses to upload files, execute scripts,

@ -7,6 +7,8 @@ page_title: Template Engine - Templates
sidebar_title: Engine
---
`@include 'from-1.5/legacy-json-warning.mdx'`
# Template Engine
All strings within templates are processed by a common Packer templating

@ -10,6 +10,8 @@ page_title: JSON Templates
sidebar_title: JSON Templates
---
`@include 'from-1.5/legacy-json-warning.mdx'`
# JSON Templates
These Packer templates are JSON files that configure the various components

@ -7,6 +7,8 @@ page_title: Post-Processors - Templates
sidebar_title: Post-Processors
---
`@include 'from-1.5/legacy-json-warning.mdx'`
# Template Post-Processors
The post-processor section within a template configures any post-processing

@ -7,6 +7,8 @@ page_title: Provisioners - Templates
sidebar_title: Provisioners
---
`@include 'from-1.5/legacy-json-warning.mdx'`
# Template Provisioners
Within the template, the provisioners section contains an array of all the

@ -9,6 +9,8 @@ page_title: User Variables - Templates
sidebar_title: User Variables
---
`@include 'from-1.5/legacy-json-warning.mdx'`
# Template User Variables
User variables allow your templates to be further configured with variables

@ -18,3 +18,5 @@
- `image_tags` ([]string) - List of tags to add to the image after creation.
- `image_min_disk` (int) - Minimum disk size needed to boot image, in gigabytes.
- `skip_create_image` (bool) - Skip creating the image. Useful for setting to `true` during a build test stage. Defaults to `false`.

@ -1,6 +1,7 @@
-> **Note:** This page is about HCL2 in Packer 1.5 and later. HCL2 support for
Packer is still in Beta. Please see the [Packer Issue
Tracker](https://github.com/hashicorp/packer/issues/9176) for a list of
supported features. For the old-style stable configuration language see
[template docs](/docs/templates). You can now transform your JSON file into an
HCL2 config file using the [hcl2_upgrade command](/docs/commands/hcl2_upgrade).
-> **Note:** This page is about HCL2 Packer templates. HCL2 templates
were first introduced as a beta feature into Packer version 1.5. As of v1.7,
HCL2 support is no longer in beta, and is the preferred way to write Packer
configuration. For the old-style stable configuration language see
[template docs](/docs/templates/legacy_json_templates). As of v1.6.2, you can
convert your legacy JSON template into an HCL2 config file using the
[hcl2_upgrade command](/docs/commands/hcl2_upgrade).

@ -0,0 +1,6 @@
-> **Note:** This page is about older-style JSON Packer templates. JSON
templates are still supported by the Packer core, but new features added to the
Packer core may not be implemented for JSON templates. We recommend you
transition to HCL templates as soon as is convenient for you, in order to have
the best possible experience with Packer. To help you upgrade your templates,
we have written an [hcl2_upgrade command](/docs/commands/hcl2_upgrade) command.
Loading…
Cancel
Save