Skip to content

Commit

Permalink
hcp: extract all HCP-related code to hcp package
Browse files Browse the repository at this point in the history
As part of the work to displace everything related to HCP from scattered
places around the Packer code, we move it all to an hcp package.

This in turn reduces the amount of code that the commands have to
integrate, and leaves the HCP details to its own enclave.
  • Loading branch information
lbajolet-hashicorp committed Sep 7, 2022
1 parent cd1ee3a commit dc62051
Show file tree
Hide file tree
Showing 20 changed files with 440 additions and 727 deletions.
2 changes: 1 addition & 1 deletion acctest/testing.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ func Test(t TestT, c TestCase) {
},
Template: tpl,
})
err = core.Initialize()
err = core.Initialize(packer.InitializeOptions{})
if err != nil {
t.Fatal(fmt.Sprintf("Failed to init core: %s", err))
return
Expand Down
96 changes: 29 additions & 67 deletions command/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (

"github.com/hashicorp/hcl/v2"
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
"github.com/hashicorp/packer/hcp"
"github.com/hashicorp/packer/packer"
"golang.org/x/sync/semaphore"

Expand Down Expand Up @@ -88,33 +89,16 @@ func (c *BuildCommand) RunContext(buildCtx context.Context, cla *BuildArgs) int
return ret
}

diags = TrySetupHCP(packerStarter)
ret = writeDiags(c.Ui, nil, diags)
if ret != 0 {
return ret
}

// This build currently enforces a 1:1 mapping that one publisher can be assigned to a single packer config file.
// It also requires that each config type implements this ConfiguredArtifactMetadataPublisher to return a configured bucket.
// TODO find an option that is not managed by a globally shared Publisher.
ArtifactMetadataPublisher, diags := packerStarter.ConfiguredArtifactMetadataPublisher()
if diags.HasErrors() {
return writeDiags(c.Ui, nil, diags)
hcpHandler, err := hcp.GetHCPHandler(packerStarter)
if err != nil {
c.Ui.Error(fmt.Sprintf("HCP setup failed: %s", err))
return -1
}

// We need to create a bucket and an empty iteration before we retrieve builds
// so that we can add the iteration ID to the build's eval context
if ArtifactMetadataPublisher != nil {
if err := ArtifactMetadataPublisher.Initialize(buildCtx); err != nil {
diags := hcl.Diagnostics{
&hcl.Diagnostic{
Summary: "HCP Packer Registry iteration initialization failed",
Detail: fmt.Sprintf("Failed to initialize iteration for %q\n %s", ArtifactMetadataPublisher.Slug, err),
Severity: hcl.DiagError,
},
}
return writeDiags(c.Ui, nil, diags)
}
err = hcpHandler.PopulateIteration(buildCtx)
if err != nil {
c.Ui.Error(fmt.Sprintf("HCP populating iteration failed: %s", err))
return -1
}

builds, hcpMap, diags := packerStarter.GetBuilds(packer.GetBuildsOptions{
Expand All @@ -133,21 +117,6 @@ func (c *BuildCommand) RunContext(buildCtx context.Context, cla *BuildArgs) int
c.Ui.Say("Debug mode enabled. Builds will not be parallelized.")
}

// Now that builds have been retrieved, we can populate the iteration with
// the builds we expect to run.
if ArtifactMetadataPublisher != nil {
if err := ArtifactMetadataPublisher.PopulateIteration(buildCtx); err != nil {
diags := hcl.Diagnostics{
&hcl.Diagnostic{
Summary: "HCP Packer Registry build initialization failed",
Detail: fmt.Sprintf("Failed to initialize build for %q\n %s", ArtifactMetadataPublisher.Slug, err),
Severity: hcl.DiagError,
},
}
return writeDiags(c.Ui, nil, diags)
}
}

// Compile all the UIs for the builds
colors := [5]packer.UiColor{
packer.UiColorGreen,
Expand Down Expand Up @@ -225,26 +194,20 @@ func (c *BuildCommand) RunContext(buildCtx context.Context, cla *BuildArgs) int
// Get the start of the build
buildStart := time.Now()

err := hcpHandler.BuildStart(buildCtx, hcpMap[name])
if err != nil {
if strings.Contains(err.Error(), "already done") {
ui.Say(fmt.Sprintf(
"skipping HCP-enabled build %q: already done.",
name))
return
}
}

defer wg.Done()

defer limitParallel.Release(1)

if ArtifactMetadataPublisher != nil {
err := ArtifactMetadataPublisher.BuildStart(buildCtx, hcpMap[name])
if err != nil {
msg := err.Error()
if strings.Contains(msg, "already done") {
ui.Say(fmt.Sprintf(
"Build %q already done for bucket %q, skipping to prevent drift: %q",
name,
ArtifactMetadataPublisher.Slug,
err))
return
}

}
}

log.Printf("Starting build run: %s", name)
runArtifacts, err := b.Run(buildCtx, ui)

Expand All @@ -253,17 +216,16 @@ func (c *BuildCommand) RunContext(buildCtx context.Context, cla *BuildArgs) int
buildDuration := buildEnd.Sub(buildStart)
fmtBuildDuration := durafmt.Parse(buildDuration).LimitFirstN(2)

if ArtifactMetadataPublisher != nil {
runArtifacts, err = ArtifactMetadataPublisher.BuildDone(
buildCtx,
hcpMap[name],
runArtifacts,
err,
)
if err != nil {
ui.Error(fmt.Sprintf("failed to complete HCP build %q: %s",
name, err))
}
runArtifacts, hcperr := hcpHandler.BuildDone(
buildCtx,
hcpMap[name],
runArtifacts,
err)
if hcperr != nil {
ui.Error(fmt.Sprintf(
"failed to complete HCP-enabled build %q: %s.",
name,
hcperr))
}

if err != nil {
Expand Down
68 changes: 0 additions & 68 deletions command/core_wrapper.go

This file was deleted.

4 changes: 2 additions & 2 deletions command/hcl2_upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,8 +158,8 @@ func (c *HCL2UpgradeCommand) RunContext(_ context.Context, cla *HCL2UpgradeArgs)
return 1
}

core := hdl.(*CoreWrapper).Core
if err := core.Initialize(); err != nil {
core := hdl.(*packer.Core)
if err := core.Initialize(packer.InitializeOptions{}); err != nil {
c.Ui.Error(fmt.Sprintf("Ignoring following initialization error: %v", err))
}
tpl := core.Template
Expand Down
2 changes: 1 addition & 1 deletion command/meta.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,5 +169,5 @@ func (m *Meta) GetConfigFromJSON(cla *MetaArgs) (packer.Handler, int) {
m.Ui.Error(err.Error())
ret = 1
}
return &CoreWrapper{core}, ret
return core, ret
}
Loading

0 comments on commit dc62051

Please sign in to comment.