Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor build into stageBuilder type #343

Merged
merged 4 commits into from
Sep 13, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pkg/config/stage.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import "github.com/moby/buildkit/frontend/dockerfile/instructions"
type KanikoStage struct {
instructions.Stage
BaseImageIndex int
FinalStage bool
Final bool
BaseImageStoredLocally bool
SaveStage bool
}
2 changes: 1 addition & 1 deletion pkg/dockerfile/dockerfile.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ func Stages(opts *config.KanikoOptions) ([]config.KanikoStage, error) {
BaseImageIndex: baseImageIndex(index, stages),
BaseImageStoredLocally: (baseImageIndex(index, stages) != -1),
SaveStage: saveStage(index, stages),
FinalStage: index == targetStage,
Final: index == targetStage,
})
if index == targetStage {
break
Expand Down
221 changes: 135 additions & 86 deletions pkg/executor/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (
"github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"

"github.com/GoogleContainerTools/kaniko/pkg/commands"
Expand All @@ -40,116 +41,164 @@ import (
"github.com/GoogleContainerTools/kaniko/pkg/util"
)

func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
// Parse dockerfile and unpack base image to root
stages, err := dockerfile.Stages(opts)
// stageBuilder contains all fields necessary to build one stage of a Dockerfile
type stageBuilder struct {
stage config.KanikoStage
image v1.Image
cf *v1.ConfigFile
snapshotter *snapshot.Snapshotter
baseImageDigest string
}

// newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage
func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage) (*stageBuilder, error) {
sourceImage, err := util.RetrieveSourceImage(stage, opts.BuildArgs)
if err != nil {
return nil, err
}

imageConfig, err := util.RetrieveConfigFile(sourceImage)
if err != nil {
return nil, err
}
if err := resolveOnBuild(&stage, &imageConfig.Config); err != nil {
return nil, err
}
hasher, err := getHasher(opts.SnapshotMode)
if err != nil {
return nil, err
}
for index, stage := range stages {
// Unpack file system to root
sourceImage, err := util.RetrieveSourceImage(stage, opts.BuildArgs)
l := snapshot.NewLayeredMap(hasher, util.CacheHasher())
snapshotter := snapshot.NewSnapshotter(l, constants.RootDir)

digest, err := sourceImage.Digest()
if err != nil {
return nil, err
}
return &stageBuilder{
stage: stage,
image: sourceImage,
cf: imageConfig,
snapshotter: snapshotter,
baseImageDigest: digest.String(),
}, nil
}

// key will return a string representation of the build at the cmd
// TODO: priyawadhwa@ to fill this out when implementing caching
// func (s *stageBuilder) key(cmd string) (string, error) {
// return "", nil
// }

// extractCachedLayer will extract the cached layer and append it to the config file
// TODO: priyawadhwa@ to fill this out when implementing caching
// func (s *stageBuilder) extractCachedLayer(layer v1.Image, createdBy string) error {
// return nil
// }

func (s *stageBuilder) build(opts *config.KanikoOptions) error {
// Unpack file system to root
if err := util.GetFSFromImage(constants.RootDir, s.image); err != nil {
return err
}
// Take initial snapshot
if err := s.snapshotter.Init(); err != nil {
return err
}
args := dockerfile.NewBuildArgs(opts.BuildArgs)
for index, cmd := range s.stage.Commands {
finalCmd := index == len(s.stage.Commands)-1
command, err := commands.GetCommand(cmd, opts.SrcContext)
if err != nil {
return nil, err
}
if err := util.GetFSFromImage(constants.RootDir, sourceImage); err != nil {
return nil, err
return err
}
l := snapshot.NewLayeredMap(hasher, util.CacheHasher())
snapshotter := snapshot.NewSnapshotter(l, constants.RootDir)
// Take initial snapshot
if err := snapshotter.Init(); err != nil {
return nil, err
if command == nil {
continue
}
imageConfig, err := util.RetrieveConfigFile(sourceImage)
if err != nil {
return nil, err
}
if err := resolveOnBuild(&stage, &imageConfig.Config); err != nil {
return nil, err
logrus.Info(command.String())
if err := command.ExecuteCommand(&s.cf.Config, args); err != nil {
return err
}
buildArgs := dockerfile.NewBuildArgs(opts.BuildArgs)
for index, cmd := range stage.Commands {
finalCmd := index == len(stage.Commands)-1
dockerCommand, err := commands.GetCommand(cmd, opts.SrcContext)
if err != nil {
return nil, err
}
if dockerCommand == nil {
continue
}
logrus.Info(dockerCommand.String())
if err := dockerCommand.ExecuteCommand(&imageConfig.Config, buildArgs); err != nil {
return nil, err
}
snapshotFiles := dockerCommand.FilesToSnapshot()
var contents []byte
files := command.FilesToSnapshot()
var contents []byte

// If this is an intermediate stage, we only snapshot for the last command and we
// want to snapshot the entire filesystem since we aren't tracking what was changed
// by previous commands.
if !stage.FinalStage {
// If this is an intermediate stage, we only snapshot for the last command and we
// want to snapshot the entire filesystem since we aren't tracking what was changed
// by previous commands.
if !s.stage.Final {
if finalCmd {
contents, err = s.snapshotter.TakeSnapshotFS()
}
} else {
// If we are in single snapshot mode, we only take a snapshot once, after all
// commands have completed.
if opts.SingleSnapshot {
if finalCmd {
contents, err = snapshotter.TakeSnapshotFS()
contents, err = s.snapshotter.TakeSnapshotFS()
}
} else {
// If we are in single snapshot mode, we only take a snapshot once, after all
// commands have completed.
if opts.SingleSnapshot {
if finalCmd {
contents, err = snapshotter.TakeSnapshotFS()
}
// Otherwise, in the final stage we take a snapshot at each command. If we know
// the files that were changed, we'll snapshot those explicitly, otherwise we'll
// check if anything in the filesystem changed.
if files != nil {
contents, err = s.snapshotter.TakeSnapshot(files)
} else {
// Otherwise, in the final stage we take a snapshot at each command. If we know
// the files that were changed, we'll snapshot those explicitly, otherwise we'll
// check if anything in the filesystem changed.
if snapshotFiles != nil {
contents, err = snapshotter.TakeSnapshot(snapshotFiles)
} else {
contents, err = snapshotter.TakeSnapshotFS()
}
contents, err = s.snapshotter.TakeSnapshotFS()
}
}
if err != nil {
return nil, fmt.Errorf("Error taking snapshot of files for command %s: %s", dockerCommand, err)
}
}
if err != nil {
return fmt.Errorf("Error taking snapshot of files for command %s: %s", command, err)
}

util.MoveVolumeWhitelistToWhitelist()
if contents == nil {
logrus.Info("No files were changed, appending empty layer to config. No layer added to image.")
continue
}
// Append the layer to the image
opener := func() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(contents)), nil
}
layer, err := tarball.LayerFromOpener(opener)
if err != nil {
return nil, err
}
sourceImage, err = mutate.Append(sourceImage,
mutate.Addendum{
Layer: layer,
History: v1.History{
Author: constants.Author,
CreatedBy: dockerCommand.String(),
},
util.MoveVolumeWhitelistToWhitelist()
if contents == nil {
logrus.Info("No files were changed, appending empty layer to config. No layer added to image.")
continue
}
// Append the layer to the image
opener := func() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(contents)), nil
}
layer, err := tarball.LayerFromOpener(opener)
if err != nil {
return err
}
s.image, err = mutate.Append(s.image,
mutate.Addendum{
Layer: layer,
History: v1.History{
Author: constants.Author,
CreatedBy: command.String(),
},
)
if err != nil {
return nil, err
}
},
)
if err != nil {
return err
}
}
return nil
}

// DoBuild executes building the Dockerfile
func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
// Parse dockerfile and unpack base image to root
stages, err := dockerfile.Stages(opts)
if err != nil {
return nil, err
}
for index, stage := range stages {
sb, err := newStageBuilder(opts, stage)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("getting stage builder for stage %d", index))
}
if err := sb.build(opts); err != nil {
return nil, errors.Wrap(err, "error building stage")
}
sourceImage, err = mutate.Config(sourceImage, imageConfig.Config)
sourceImage, err := mutate.Config(sb.image, sb.cf.Config)
if err != nil {
return nil, err
}
if stage.FinalStage {
if stage.Final {
sourceImage, err = mutate.CreatedAt(sourceImage, v1.Time{Time: time.Now()})
if err != nil {
return nil, err
Expand Down