summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore51
-rw-r--r--LICENSE201
-rw-r--r--README.md13
-rw-r--r--TODO56
-rw-r--r--action.go67
-rw-r--r--actions/actions_doc.go6
-rw-r--r--actions/apt_action.go61
-rw-r--r--actions/debootstrap_action.go146
-rw-r--r--actions/download_action.go168
-rw-r--r--actions/filesystem_deploy_action.go131
-rw-r--r--actions/image_partition_action.go432
-rw-r--r--actions/ostree_commit_action.go84
-rw-r--r--actions/ostree_deploy_action.go183
-rw-r--r--actions/overlay_action.go67
-rw-r--r--actions/pack_action.go39
-rw-r--r--actions/raw_action.go130
-rw-r--r--actions/run_action.go132
-rw-r--r--actions/unpack_action.go100
-rw-r--r--archiver.go217
-rw-r--r--archiver_test.go157
-rw-r--r--cmd/debos/debos.go219
-rw-r--r--commands.go203
-rw-r--r--commands_test.go9
-rw-r--r--debug.go31
-rw-r--r--filesystem.go105
-rw-r--r--net.go45
-rw-r--r--os.go74
-rw-r--r--recipe/recipe.go175
-rw-r--r--recipe/recipe_test.go182
29 files changed, 3484 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..32eae19
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,51 @@
+
+# Created by https://www.gitignore.io/api/vim,linux,go
+
+### Go ###
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+# Golang project vendor packages which should be ignored
+vendor/
+
+### Linux ###
+*~
+
+# temporary files which can be created if a process still has a handle open of a deleted file
+.fuse_hidden*
+
+# KDE directory preferences
+.directory
+
+# Linux trash folder which might appear on any partition or disk
+.Trash-*
+
+# .nfs files are created when an open file is removed but is still being accessed
+.nfs*
+
+### Vim ###
+# swap
+[._]*.s[a-v][a-z]
+[._]*.sw[a-p]
+[._]s[a-v][a-z]
+[._]sw[a-p]
+# session
+Session.vim
+# temporary
+.netrwhist
+# auto-generated tag files
+tags
+
+# End of https://www.gitignore.io/api/vim,linux,go
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..3defeeb
--- /dev/null
+++ b/README.md
@@ -0,0 +1,13 @@
+debos
+=====
+
+Debian OS builder. debos is a tool to make creation of various debian based os
+"images" simpler. While most other tools focus on specific use-case, debos is
+more meant as a toolchain to make comon actions trivial while providing enough
+rope to do whatever tweaking that might be required behind the scene.
+
+debos expects a yaml file as input, syntax description can be found at:
+ https://godoc.org/github.com/go-debos/debos/actions
+
+and examples are to be found at:
+ https://github.com/go-debos/debos-recipes
diff --git a/TODO b/TODO
new file mode 100644
index 0000000..36f33bf
--- /dev/null
+++ b/TODO
@@ -0,0 +1,56 @@
+Before 1.0
+* Rename to debos
+
+Potentially After 1.0
+
+* Make logging more consistent
+* Have a mode to output the final yaml after templating and then exit (dry-run?)
+
+
+* Documentation of all actions and overall concepts!
+* Tests, individual actions are mostly testable..
+
+* Support having devices as image files (fakemachine should handle it ok)
+
+* Create a standard docker image with debos to push to docker hub for easy
+ image creation
+
+* Template function to include other files
+* Dependency system between recipes ?
+
+* Warn on unknown yaml fields?
+
+* New actions:
+** Create a manifest from install debian packages
+** Install all dbgsym packages for a given rootfs
+** Install all dev package for installed libraries
+** Generalize the ostree conversion into an action?
+** Ostree checkout for incremental fast updates?
+** Action to get remote content which can be copied in
+*** Download tarball from http and unpack (same content)
+*** Download git tree (e.g. to get rpi firmware)
+*** Download .deb (e.g. u-boot to raw write rather then install)
+
+* Check what triggers ostree changes and try to minimize those
+
+* Control passwd & group contents as that can be problematic with ostree
+
+* Rewrite debootstrap in pure go to add a bunch of smarts (e.g. parallel
+ downloads, local caching etc)
+
+* Rewrite pack/unpack in pure go and support more formats
+
+
+* Make actions using (host) commands check their existance early
+
+
+* Ensure we copy xattrs?
+
+* Fix race in qemu-helper (if qemu-user-static gets installed in the system
+ chroot things will get confused)
+
+* Do shell compatible parsing of script: argument to run actions and
+ environment substitution
+
+* Support gpg signing ostree commits
+
diff --git a/action.go b/action.go
new file mode 100644
index 0000000..f7c4062
--- /dev/null
+++ b/action.go
@@ -0,0 +1,67 @@
+package debos
+
+import (
+ "bytes"
+ "github.com/go-debos/fakemachine"
+ "log"
+)
+
+// Mapping from partition name as configured in the image-partition action to
+// device path for usage by other actions
+type Partition struct {
+ Name string
+ DevicePath string
+}
+
+type DebosContext struct {
+ Scratchdir string
+ Rootdir string
+ Artifactdir string
+ Downloaddir string
+ Image string
+ ImagePartitions []Partition
+ ImageMntDir string
+ ImageFSTab bytes.Buffer // Fstab as per partitioning
+ ImageKernelRoot string // Kernel cmdline root= snippet for the / of the image
+ RecipeDir string
+ Architecture string
+ DebugShell string
+ Origins map[string]string
+}
+
+type Action interface {
+ /* FIXME verify should probably be prepare or somesuch */
+ Verify(context *DebosContext) error
+ PreMachine(context *DebosContext, m *fakemachine.Machine, args *[]string) error
+ PreNoMachine(context *DebosContext) error
+ Run(context *DebosContext) error
+ Cleanup(context DebosContext) error
+ PostMachine(context DebosContext) error
+ String() string
+}
+
+type BaseAction struct {
+ Action string
+ Description string
+}
+
+func (b *BaseAction) LogStart() {
+ log.Printf("==== %s ====\n", b)
+}
+
+func (b *BaseAction) Verify(context *DebosContext) error { return nil }
+func (b *BaseAction) PreMachine(context *DebosContext,
+ m *fakemachine.Machine,
+ args *[]string) error {
+ return nil
+}
+func (b *BaseAction) PreNoMachine(context *DebosContext) error { return nil }
+func (b *BaseAction) Run(context *DebosContext) error { return nil }
+func (b *BaseAction) Cleanup(context DebosContext) error { return nil }
+func (b *BaseAction) PostMachine(context DebosContext) error { return nil }
+func (b *BaseAction) String() string {
+ if b.Description == "" {
+ return b.Action
+ }
+ return b.Description
+}
diff --git a/actions/actions_doc.go b/actions/actions_doc.go
new file mode 100644
index 0000000..7ebd532
--- /dev/null
+++ b/actions/actions_doc.go
@@ -0,0 +1,6 @@
+// Copyright 2017, Collabora Ltd.
+
+/*
+Package 'actions' implements 'debos' modules used for OS creation.
+*/
+package actions
diff --git a/actions/apt_action.go b/actions/apt_action.go
new file mode 100644
index 0000000..681c069
--- /dev/null
+++ b/actions/apt_action.go
@@ -0,0 +1,61 @@
+/*
+Apt Action
+
+Install packages and their dependencies to the target rootfs with 'apt'.
+
+Yaml syntax:
+ - action: apt
+ recommends: bool
+ packages:
+ - package1
+ - package2
+
+Mandatory properties:
+
+- packages -- list of packages to install
+
+Optional properties:
+
+- recommends -- boolean indicating if suggested packages will be installed
+*/
+package actions
+
+import (
+ "github.com/go-debos/debos"
+)
+
+type AptAction struct {
+ debos.BaseAction `yaml:",inline"`
+ Recommends bool
+ Packages []string
+}
+
+func (apt *AptAction) Run(context *debos.DebosContext) error {
+ apt.LogStart()
+ aptOptions := []string{"apt-get", "-y"}
+
+ if !apt.Recommends {
+ aptOptions = append(aptOptions, "--no-install-recommends")
+ }
+
+ aptOptions = append(aptOptions, "install")
+ aptOptions = append(aptOptions, apt.Packages...)
+
+ c := debos.NewChrootCommandForContext(*context)
+ c.AddEnv("DEBIAN_FRONTEND=noninteractive")
+
+ err := c.Run("apt", "apt-get", "update")
+ if err != nil {
+ return err
+ }
+ err = c.Run("apt", aptOptions...)
+ if err != nil {
+ return err
+ }
+ err = c.Run("apt", "apt-get", "clean")
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/actions/debootstrap_action.go b/actions/debootstrap_action.go
new file mode 100644
index 0000000..a53d153
--- /dev/null
+++ b/actions/debootstrap_action.go
@@ -0,0 +1,146 @@
+/*
+Debootstrap Action
+
+Construct the target rootfs with debootstrap tool.
+
+Yaml syntax:
+ - action: debootstrap
+ mirror: URL
+ suite: "name"
+ components: <list of components>
+ variant: "name"
+ keyring-package:
+
+Mandatory properties:
+
+- suite -- release code name or symbolic name (e.g. "stable")
+
+Optional properties:
+
+- mirror -- URL with Debian-compatible repository
+
+- variant -- name of the bootstrap script variant to use
+
+- components -- list of components to use for packages selection.
+Example:
+ components: [ main, contrib ]
+
+- keyring-package -- keyring for packages validation. Currently ignored.
+
+- merged-usr -- use merged '/usr' filesystem, true by default.
+*/
+package actions
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "strings"
+
+ "github.com/go-debos/debos"
+)
+
+type DebootstrapAction struct {
+ debos.BaseAction `yaml:",inline"`
+ Suite string
+ Mirror string
+ Variant string
+ KeyringPackage string `yaml:"keyring-package"`
+ Components []string
+ MergedUsr bool `yaml:"merged-usr"`
+}
+
+func NewDebootstrapAction() *DebootstrapAction {
+ d := DebootstrapAction{}
+ // Use filesystem with merged '/usr' by default
+ d.MergedUsr = true
+ return &d
+
+}
+
+func (d *DebootstrapAction) RunSecondStage(context debos.DebosContext) error {
+ cmdline := []string{
+ "/debootstrap/debootstrap",
+ "--no-check-gpg",
+ "--second-stage"}
+
+ if d.Components != nil {
+ s := strings.Join(d.Components, ",")
+ cmdline = append(cmdline, fmt.Sprintf("--components=%s", s))
+ }
+
+ c := debos.NewChrootCommandForContext(context)
+ // Can't use nspawn for debootstrap as it wants to create device nodes
+ c.ChrootMethod = debos.CHROOT_METHOD_CHROOT
+
+ return c.Run("Debootstrap (stage 2)", cmdline...)
+}
+
+func (d *DebootstrapAction) Run(context *debos.DebosContext) error {
+ d.LogStart()
+ cmdline := []string{"debootstrap", "--no-check-gpg"}
+
+ if d.MergedUsr {
+ cmdline = append(cmdline, "--merged-usr")
+ }
+
+ if d.KeyringPackage != "" {
+ cmdline = append(cmdline, fmt.Sprintf("--keyring=%s", d.KeyringPackage))
+ }
+
+ if d.Components != nil {
+ s := strings.Join(d.Components, ",")
+ cmdline = append(cmdline, fmt.Sprintf("--components=%s", s))
+ }
+
+ /* FIXME drop the hardcoded amd64 assumption" */
+ foreign := context.Architecture != "amd64"
+
+ if foreign {
+ cmdline = append(cmdline, "--foreign")
+ cmdline = append(cmdline, fmt.Sprintf("--arch=%s", context.Architecture))
+
+ }
+
+ if d.Variant != "" {
+ cmdline = append(cmdline, fmt.Sprintf("--variant=%s", d.Variant))
+ }
+
+ cmdline = append(cmdline, d.Suite)
+ cmdline = append(cmdline, context.Rootdir)
+ cmdline = append(cmdline, d.Mirror)
+ cmdline = append(cmdline, "/usr/share/debootstrap/scripts/unstable")
+
+ err := debos.Command{}.Run("Debootstrap", cmdline...)
+
+ if err != nil {
+ return err
+ }
+
+ if foreign {
+ err = d.RunSecondStage(*context)
+ if err != nil {
+ return err
+ }
+ }
+
+ /* HACK */
+ srclist, err := os.OpenFile(path.Join(context.Rootdir, "etc/apt/sources.list"),
+ os.O_RDWR|os.O_CREATE, 0755)
+ if err != nil {
+ return err
+ }
+ _, err = io.WriteString(srclist, fmt.Sprintf("deb %s %s %s\n",
+ d.Mirror,
+ d.Suite,
+ strings.Join(d.Components, " ")))
+ if err != nil {
+ return err
+ }
+ srclist.Close()
+
+ c := debos.NewChrootCommandForContext(*context)
+
+ return c.Run("apt clean", "/usr/bin/apt-get", "clean")
+}
diff --git a/actions/download_action.go b/actions/download_action.go
new file mode 100644
index 0000000..a4099e7
--- /dev/null
+++ b/actions/download_action.go
@@ -0,0 +1,168 @@
+/*
+Download Action
+
+Download a single file from Internet and unpack it in place if needed.
+
+Yaml syntax:
+ - action: download
+ url: http://example.domain/path/filename.ext
+ name: firmware
+ filename: output_name
+ unpack: bool
+ compression: gz
+
+Mandatory properties:
+
+- url -- URL to an object for download
+
+- name -- string which allow to use downloaded object in other actions
+via 'origin' property. If 'unpack' property is set to 'true' name will
+refer to temporary directory with extracted content.
+
+Optional properties:
+
+- filename -- use this property as the name for saved file. Useful if URL does not
+contain file name in path, for example it is possible to download files from URLs without path part.
+
+- unpack -- hint for action to extract all files from downloaded archive.
+See the 'Unpack' action for more information.
+
+- compression -- optional hint for unpack allowing to use proper compression method.
+See the 'Unpack' action for more information.
+*/
+package actions
+
+import (
+ "fmt"
+ "github.com/go-debos/debos"
+ "net/url"
+ "path"
+)
+
+type DownloadAction struct {
+ debos.BaseAction `yaml:",inline"`
+ Url string // URL for downloading
+ Filename string // File name, overrides the name from URL.
+ Unpack bool // Unpack downloaded file to directory dedicated for download
+ Compression string // compression type
+ Name string // exporting path to file or directory(in case of unpack)
+}
+
+// validateUrl checks if supported URL is passed from recipe
+// Return:
+// - parsed URL
+// - nil in case of success
+func (d *DownloadAction) validateUrl() (*url.URL, error) {
+
+ url, err := url.Parse(d.Url)
+ if err != nil {
+ return url, err
+ }
+
+ switch url.Scheme {
+ case "http", "https":
+ // Supported scheme
+ default:
+ return url, fmt.Errorf("Unsupported URL is provided: '%s'", url.String())
+ }
+
+ return url, nil
+}
+
+func (d *DownloadAction) validateFilename(context *debos.DebosContext, url *url.URL) (filename string, err error) {
+ if len(d.Filename) == 0 {
+ // Trying to guess the name from URL Path
+ filename = path.Base(url.Path)
+ } else {
+ filename = path.Base(d.Filename)
+ }
+ if len(filename) == 0 {
+ return "", fmt.Errorf("Incorrect filename is provided for '%s'", d.Url)
+ }
+ filename = path.Join(context.Scratchdir, filename)
+ return filename, nil
+}
+
+func (d *DownloadAction) archive(filename string) (debos.Archive, error) {
+ archive, err := debos.NewArchive(filename)
+ if err != nil {
+ return archive, err
+ }
+ switch archive.Type() {
+ case debos.Tar:
+ if len(d.Compression) > 0 {
+ if err := archive.AddOption("tarcompression", d.Compression); err != nil {
+ return archive, err
+ }
+ }
+ default:
+ }
+ return archive, nil
+}
+
+func (d *DownloadAction) Verify(context *debos.DebosContext) error {
+ var filename string
+
+ if len(d.Name) == 0 {
+ return fmt.Errorf("Property 'name' is mandatory for download action\n")
+ }
+
+ url, err := d.validateUrl()
+ if err != nil {
+ return err
+ }
+ filename, err = d.validateFilename(context, url)
+ if err != nil {
+ return err
+ }
+ if d.Unpack == true {
+ if _, err := d.archive(filename); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (d *DownloadAction) Run(context *debos.DebosContext) error {
+ var filename string
+ d.LogStart()
+
+ url, err := d.validateUrl()
+ if err != nil {
+ return err
+ }
+
+ filename, err = d.validateFilename(context, url)
+ if err != nil {
+ return err
+ }
+ originPath := filename
+
+ switch url.Scheme {
+ case "http", "https":
+ err := debos.DownloadHttpUrl(url.String(), filename)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("Unsupported URL is provided: '%s'", url.String())
+ }
+
+ if d.Unpack == true {
+ archive, err := d.archive(filename)
+ if err != nil {
+ return err
+ }
+
+ targetdir := filename + ".d"
+ err = archive.RelaxedUnpack(targetdir)
+ if err != nil {
+ return err
+ }
+ originPath = targetdir
+ }
+
+ context.Origins[d.Name] = originPath
+
+ return nil
+}
diff --git a/actions/filesystem_deploy_action.go b/actions/filesystem_deploy_action.go
new file mode 100644
index 0000000..bb89a83
--- /dev/null
+++ b/actions/filesystem_deploy_action.go
@@ -0,0 +1,131 @@
+/*
+FilesystemDeploy Action
+
+Deploy prepared root filesystem to output image. This action requires
+'image-partition' action to be executed before it.
+
+Yaml syntax:
+ - action: filesystem-deploy
+ setup-fstab: bool
+ setup-kernel-cmdline: bool
+
+Optional properties:
+
+- setup-fstab -- generate '/etc/fstab' file according to information provided
+by 'image-partition' action. By default is 'true'.
+
+- setup-kernel-cmdline -- add location of root partition to '/etc/kernel/cmdline'
+file on target image. By default is 'true'.
+*/
+package actions
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path"
+ "strings"
+
+ "github.com/go-debos/debos"
+)
+
+type FilesystemDeployAction struct {
+ debos.BaseAction `yaml:",inline"`
+ SetupFSTab bool `yaml:"setup-fstab"`
+ SetupKernelCmdline bool `yaml:"setup-kernel-cmdline"`
+}
+
+func NewFilesystemDeployAction() *FilesystemDeployAction {
+ fd := &FilesystemDeployAction{SetupFSTab: true, SetupKernelCmdline: true}
+ fd.Description = "Deploying filesystem"
+
+ return fd
+}
+
+func (fd *FilesystemDeployAction) setupFSTab(context *debos.DebosContext) error {
+ if context.ImageFSTab.Len() == 0 {
+ return errors.New("Fstab not generated, missing image-partition action?")
+ }
+
+ log.Print("Setting up fstab")
+
+ err := os.MkdirAll(path.Join(context.Rootdir, "etc"), 0755)
+ if err != nil {
+ return fmt.Errorf("Couldn't create etc in image: %v", err)
+ }
+
+ fstab := path.Join(context.Rootdir, "etc/fstab")
+ f, err := os.OpenFile(fstab, os.O_RDWR|os.O_CREATE, 0755)
+
+ if err != nil {
+ return fmt.Errorf("Couldn't open fstab: %v", err)
+ }
+
+ _, err = io.Copy(f, &context.ImageFSTab)
+
+ if err != nil {
+ return fmt.Errorf("Couldn't write fstab: %v", err)
+ }
+ f.Close()
+
+ return nil
+}
+
+func (fd *FilesystemDeployAction) setupKernelCmdline(context *debos.DebosContext) error {
+ log.Print("Setting up /etc/kernel/cmdline")
+
+ err := os.MkdirAll(path.Join(context.Rootdir, "etc", "kernel"), 0755)
+ if err != nil {
+ return fmt.Errorf("Couldn't create etc/kernel in image: %v", err)
+ }
+ path := path.Join(context.Rootdir, "etc/kernel/cmdline")
+ current, _ := ioutil.ReadFile(path)
+ f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0755)
+
+ if err != nil {
+ log.Fatalf("Couldn't open kernel cmdline: %v", err)
+ }
+
+ cmdline := fmt.Sprintf("%s %s\n",
+ strings.TrimSpace(string(current)),
+ context.ImageKernelRoot)
+
+ _, err = f.WriteString(cmdline)
+ if err != nil {
+ return fmt.Errorf("Couldn't write kernel/cmdline: %v", err)
+ }
+
+ f.Close()
+ return nil
+}
+
+func (fd *FilesystemDeployAction) Run(context *debos.DebosContext) error {
+ fd.LogStart()
+ /* Copying files is actually silly hafd, one has to keep permissions, ACL's
+ * extended attribute, misc, other. Leave it to cp...
+ */
+ err := debos.Command{}.Run("Deploy to image", "cp", "-a", context.Rootdir+"/.", context.ImageMntDir)
+ if err != nil {
+ return fmt.Errorf("rootfs deploy failed: %v", err)
+ }
+ context.Rootdir = context.ImageMntDir
+ context.Origins["filesystem"] = context.ImageMntDir
+
+ if fd.SetupFSTab {
+ err = fd.setupFSTab(context)
+ if err != nil {
+ return err
+ }
+ }
+ if fd.SetupKernelCmdline {
+ err = fd.setupKernelCmdline(context)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/actions/image_partition_action.go b/actions/image_partition_action.go
new file mode 100644
index 0000000..5054e6d
--- /dev/null
+++ b/actions/image_partition_action.go
@@ -0,0 +1,432 @@
+/*
+ImagePartition Action
+
+This action creates an image file, partitions it and formats the filesystems.
+
+Yaml syntax:
+ - action: image-partition
+ imagename: image_name
+ imagesize: size
+ partitiontype: gpt
+ gpt_gap: offset
+ partitions:
+ <list of partitions>
+ mountpoints:
+ <list of mount points>
+
+Mandatory properties:
+
+- imagename -- the name of the image file.
+
+- imagesize -- generated image size in human-readable form, examples: 100MB, 1GB, etc.
+
+- partitiontype -- partition table type. Currently only 'gpt' and 'msdos'
+partition tables are supported.
+
+- gpt_gap -- shifting GPT allow to use this gap for bootloaders, for example if
+U-Boot intersects with original GPT placement.
+Only works if parted supports an extra argument to mklabel to specify the gpt offset.
+
+- partitions -- list of partitions, at least one partition is needed.
+Partition properties are described below.
+
+- mountpoints -- list of mount points for partitions.
+Properties for mount points are described below.
+
+Yaml syntax for partitions:
+
+ partitions:
+ - name: label
+ name: partition name
+ fs: filesystem
+ start: offset
+ end: offset
+ flags: list of flags
+
+Mandatory properties:
+
+- name -- is used for referencing named partition for mount points
+configuration (below) and label the filesystem located on this partition.
+
+- fs -- filesystem type used for formatting.
+
+'none' fs type should be used for partition without filesystem.
+
+- start -- offset from beginning of the disk there the partition starts.
+
+- end -- offset from beginning of the disk there the partition ends.
+
+For 'start' and 'end' properties offset can be written in human readable
+form -- '32MB', '1GB' or as disk percentage -- '100%'.
+
+Optional properties:
+
+- flags -- list of additional flags for partition compatible with parted(8)
+'set' command.
+
+Yaml syntax for mount points:
+
+ mountpoints:
+ - mountpoint: path
+ partition: partition label
+ options: list of options
+
+Mandatory properties:
+
+- partition -- partition name for mounting.
+
+- mountpoint -- path in the target root filesystem where the named partition
+should be mounted.
+
+Optional properties:
+
+- options -- list of options to be added to appropriate entry in fstab file.
+
+Layout example for Raspberry PI 3:
+
+ - action: image-partition
+ imagename: "debian-rpi3.img"
+ imagesize: 1GB
+ partitiontype: msdos
+ mountpoints:
+ - mountpoint: /
+ partition: root
+ - mountpoint: /boot/firmware
+ partition: firmware
+ options: [ x-systemd.automount ]
+ partitions:
+ - name: firmware
+ fs: vfat
+ start: 0%
+ end: 64MB
+ - name: root
+ fs: ext4
+ start: 64MB
+ end: 100%
+ flags: [ boot ]
+*/
+package actions
+
+import (
+ "errors"
+ "fmt"
+ "github.com/docker/go-units"
+ "github.com/go-debos/fakemachine"
+ "log"
+ "os"
+ "os/exec"
+ "path"
+ "strings"
+ "syscall"
+
+ "github.com/go-debos/debos"
+)
+
+type Partition struct {
+ number int
+ Name string
+ Start string
+ End string
+ FS string
+ Flags []string
+ FSUUID string
+}
+
+type Mountpoint struct {
+ Mountpoint string
+ Partition string
+ Options []string
+ part *Partition
+}
+
+type ImagePartitionAction struct {
+ debos.BaseAction `yaml:",inline"`
+ ImageName string
+ ImageSize string
+ PartitionType string
+ GptGap string "gpt_gap"
+ Partitions []Partition
+ Mountpoints []Mountpoint
+ size int64
+ usingLoop bool
+}
+
+func (i *ImagePartitionAction) generateFSTab(context *debos.DebosContext) error {
+ context.ImageFSTab.Reset()
+
+ for _, m := range i.Mountpoints {
+ options := []string{"defaults"}
+ options = append(options, m.Options...)
+ if m.part.FSUUID == "" {
+ return fmt.Errorf("Missing fs UUID for partition %s!?!", m.part.Name)
+ }
+ context.ImageFSTab.WriteString(fmt.Sprintf("UUID=%s\t%s\t%s\t%s\t0\t0\n",
+ m.part.FSUUID, m.Mountpoint, m.part.FS,
+ strings.Join(options, ",")))
+ }
+
+ return nil
+}
+
+func (i *ImagePartitionAction) generateKernelRoot(context *debos.DebosContext) error {
+ for _, m := range i.Mountpoints {
+ if m.Mountpoint == "/" {
+ if m.part.FSUUID == "" {
+ return errors.New("No fs UUID for root partition !?!")
+ }
+ context.ImageKernelRoot = fmt.Sprintf("root=UUID=%s", m.part.FSUUID)
+ break
+ }
+ }
+
+ return nil
+}
+
+func (i ImagePartitionAction) getPartitionDevice(number int, context debos.DebosContext) string {
+ suffix := "p"
+ /* Check partition naming first: if used 'by-id'i naming convention */
+ if strings.Contains(context.Image, "/disk/by-id/") {
+ suffix = "-part"
+ }
+
+ /* If the iamge device has a digit as the last character, the partition
+ * suffix is p<number> else it's just <number> */
+ last := context.Image[len(context.Image)-1]
+ if last >= '0' && last <= '9' {
+ return fmt.Sprintf("%s%s%d", context.Image, suffix, number)
+ } else {
+ return fmt.Sprintf("%s%d", context.Image, number)
+ }
+}
+
+func (i ImagePartitionAction) PreMachine(context *debos.DebosContext, m *fakemachine.Machine,
+ args *[]string) error {
+ image, err := m.CreateImage(i.ImageName, i.size)
+ if err != nil {
+ return err
+ }
+
+ context.Image = image
+ *args = append(*args, "--internal-image", image)
+ return nil
+}
+
+func (i ImagePartitionAction) formatPartition(p *Partition, context debos.DebosContext) error {
+ label := fmt.Sprintf("Formatting partition %d", p.number)
+ path := i.getPartitionDevice(p.number, context)
+
+ cmdline := []string{}
+ switch p.FS {
+ case "vfat":
+ cmdline = append(cmdline, "mkfs.vfat", "-n", p.Name)
+ case "btrfs":
+ // Force formatting to prevent failure in case if partition was formatted already
+ cmdline = append(cmdline, "mkfs.btrfs", "-L", p.Name, "-f")
+ case "none":
+ default:
+ cmdline = append(cmdline, fmt.Sprintf("mkfs.%s", p.FS), "-L", p.Name)
+ }
+
+ if len(cmdline) != 0 {
+ cmdline = append(cmdline, path)
+
+ cmd := debos.Command{}
+ if err := cmd.Run(label, cmdline...); err != nil {
+ return err
+ }
+ }
+
+ if p.FS != "none" {
+ uuid, err := exec.Command("blkid", "-o", "value", "-s", "UUID", "-p", "-c", "none", path).Output()
+ if err != nil {
+ return fmt.Errorf("Failed to get uuid: %s", err)
+ }
+ p.FSUUID = strings.TrimSpace(string(uuid[:]))
+ }
+
+ return nil
+}
+
+func (i ImagePartitionAction) PreNoMachine(context *debos.DebosContext) error {
+
+ img, err := os.OpenFile(i.ImageName, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ return fmt.Errorf("Couldn't open image file: %v", err)
+ }
+
+ err = img.Truncate(i.size)
+ if err != nil {
+ return fmt.Errorf("Couldn't resize image file: %v", err)
+ }
+
+ img.Close()
+
+ loop, err := exec.Command("losetup", "-f", "--show", i.ImageName).Output()
+ if err != nil {
+ return fmt.Errorf("Failed to setup loop device")
+ }
+ context.Image = strings.TrimSpace(string(loop[:]))
+ i.usingLoop = true
+
+ return nil
+}
+
+func (i ImagePartitionAction) Run(context *debos.DebosContext) error {
+ i.LogStart()
+
+ command := []string{"parted", "-s", context.Image, "mklabel", i.PartitionType}
+ if len(i.GptGap) > 0 {
+ command = append(command, i.GptGap)
+ }
+ err := debos.Command{}.Run("parted", command...)
+ if err != nil {
+ return err
+ }
+ for idx, _ := range i.Partitions {
+ p := &i.Partitions[idx]
+ var name string
+ if i.PartitionType == "gpt" {
+ name = p.Name
+ } else {
+ name = "primary"
+ }
+
+ command := []string{"parted", "-a", "none", "-s", "--", context.Image, "mkpart", name}
+ switch p.FS {
+ case "vfat":
+ command = append(command, "fat32")
+ case "none":
+ default:
+ command = append(command, p.FS)
+ }
+ command = append(command, p.Start, p.End)
+
+ err = debos.Command{}.Run("parted", command...)
+ if err != nil {
+ return err
+ }
+
+ if p.Flags != nil {
+ for _, flag := range p.Flags {
+ err = debos.Command{}.Run("parted", "parted", "-s", context.Image, "set",
+ fmt.Sprintf("%d", p.number), flag, "on")
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ devicePath := i.getPartitionDevice(p.number, *context)
+ // Give a chance for udevd to create proper symlinks
+ err = debos.Command{}.Run("udevadm", "udevadm", "settle", "-t", "5",
+ "-E", devicePath)
+ if err != nil {
+ return err
+ }
+
+ err = i.formatPartition(p, *context)
+ if err != nil {
+ return err
+ }
+
+ context.ImagePartitions = append(context.ImagePartitions,
+ debos.Partition{p.Name, devicePath})
+ }
+
+ context.ImageMntDir = path.Join(context.Scratchdir, "mnt")
+ os.MkdirAll(context.ImageMntDir, 0755)
+ for _, m := range i.Mountpoints {
+ dev := i.getPartitionDevice(m.part.number, *context)
+ mntpath := path.Join(context.ImageMntDir, m.Mountpoint)
+ os.MkdirAll(mntpath, 0755)
+ err := syscall.Mount(dev, mntpath, m.part.FS, 0, "")
+ if err != nil {
+ return fmt.Errorf("%s mount failed: %v", m.part.Name, err)
+ }
+ }
+
+ err = i.generateFSTab(context)
+ if err != nil {
+ return err
+ }
+
+ err = i.generateKernelRoot(context)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (i ImagePartitionAction) Cleanup(context debos.DebosContext) error {
+ for idx := len(i.Mountpoints) - 1; idx >= 0; idx-- {
+ m := i.Mountpoints[idx]
+ mntpath := path.Join(context.ImageMntDir, m.Mountpoint)
+ syscall.Unmount(mntpath, 0)
+ }
+
+ if i.usingLoop {
+ exec.Command("losetup", "-d", context.Image).Run()
+ }
+
+ return nil
+}
+
+func (i *ImagePartitionAction) Verify(context *debos.DebosContext) error {
+ if len(i.GptGap) > 0 {
+ log.Println("WARNING: special version of parted is needed for 'gpt_gap' option")
+ if i.PartitionType != "gpt" {
+ return fmt.Errorf("gpt_gap property could be used only with 'gpt' label")
+ }
+ // Just check if it contains correct value
+ _, err := units.FromHumanSize(i.GptGap)
+ if err != nil {
+ return fmt.Errorf("Failed to parse GPT offset: %s", i.GptGap)
+ }
+ }
+
+ num := 1
+ for idx, _ := range i.Partitions {
+ p := &i.Partitions[idx]
+ p.number = num
+ num++
+ if p.Name == "" {
+ return fmt.Errorf("Partition without a name")
+ }
+ if p.Start == "" {
+ return fmt.Errorf("Partition %s missing start", p.Name)
+ }
+ if p.End == "" {
+ return fmt.Errorf("Partition %s missing end", p.Name)
+ }
+
+ switch p.FS {
+ case "fat32":
+ p.FS = "vfat"
+ case "":
+ return fmt.Errorf("Partition %s missing fs type", p.Name)
+ }
+ }
+
+ for idx, _ := range i.Mountpoints {
+ m := &i.Mountpoints[idx]
+ for pidx, _ := range i.Partitions {
+ p := &i.Partitions[pidx]
+ if m.Partition == p.Name {
+ m.part = p
+ break
+ }
+ }
+ if m.part == nil {
+ return fmt.Errorf("Couldn't fount partition for %s", m.Mountpoint)
+ }
+ }
+
+ size, err := units.FromHumanSize(i.ImageSize)
+ if err != nil {
+ return fmt.Errorf("Failed to parse image size: %s", i.ImageSize)
+ }
+
+ i.size = size
+ return nil
+}
diff --git a/actions/ostree_commit_action.go b/actions/ostree_commit_action.go
new file mode 100644
index 0000000..6d41b89
--- /dev/null
+++ b/actions/ostree_commit_action.go
@@ -0,0 +1,84 @@
+/*
+OstreeCommit Action
+
+Create OSTree commit from rootfs.
+
+Yaml syntax:
+ - action: ostree-commit
+ repository: repository name
+ branch: branch name
+ subject: commit message
+
+Mandatory properties:
+
+- repository -- path to repository with OSTree structure; the same path is
+used by 'ostree' tool with '--repo' argument.
+This path is relative to 'artifact' directory.
+Please keep in mind -- you will need a root privileges for 'bare' repository
+type (https://ostree.readthedocs.io/en/latest/manual/repo/#repository-types-and-locations).
+
+- branch -- OSTree branch name that should be used for the commit.
+
+Optional properties:
+
+- subject -- one line message with commit description.
+*/
+package actions
+
+import (
+ "log"
+ "os"
+ "path"
+
+ "github.com/go-debos/debos"
+ "github.com/sjoerdsimons/ostree-go/pkg/otbuiltin"
+)
+
+type OstreeCommitAction struct {
+ debos.BaseAction `yaml:",inline"`
+ Repository string
+ Branch string
+ Subject string
+ Command string
+}
+
+func emptyDir(dir string) {
+ d, _ := os.Open(dir)
+ defer d.Close()
+ files, _ := d.Readdirnames(-1)
+ for _, f := range files {
+ os.RemoveAll(f)
+ }
+}
+
+func (ot *OstreeCommitAction) Run(context *debos.DebosContext) error {
+ ot.LogStart()
+ repoPath := path.Join(context.Artifactdir, ot.Repository)
+
+ emptyDir(path.Join(context.Rootdir, "dev"))
+
+ repo, err := otbuiltin.OpenRepo(repoPath)
+ if err != nil {
+ return err
+ }
+
+ _, err = repo.PrepareTransaction()
+ if err != nil {
+ return err
+ }
+
+ opts := otbuiltin.NewCommitOptions()
+ opts.Subject = ot.Subject
+ ret, err := repo.Commit(context.Rootdir, ot.Branch, opts)
+ if err != nil {
+ return err
+ } else {
+ log.Printf("Commit: %s\n", ret)
+ }
+ _, err = repo.CommitTransaction()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/actions/ostree_deploy_action.go b/actions/ostree_deploy_action.go
new file mode 100644
index 0000000..b2d373d
--- /dev/null
+++ b/actions/ostree_deploy_action.go
@@ -0,0 +1,183 @@
+/*
+OstreeDeploy Action
+
+Deploy the OSTree branch to the image.
+If any preparation has been done for rootfs, it can be overwritten
+during this step.
+
+Action 'image-partition' must be called prior to OSTree deploy.
+
+Yaml syntax:
+ - action: ostree-deploy
+ repository: repository name
+ remote_repository: URL
+ branch: branch name
+ os: os name
+ setup-fstab: bool
+ setup-kernel-cmdline: bool
+ appendkernelcmdline: arguments
+
+Mandatory properties:
+
+- remote_repository -- URL to remote OSTree repository for pulling stateroot branch.
+Currently not implemented, please prepare local repository instead.
+
+- repository -- path to repository with OSTree structure.
+This path is relative to 'artifact' directory.
+
+- os -- os deployment name, as explained in:
+https://ostree.readthedocs.io/en/latest/manual/deployment/
+
+- branch -- branch of the repository to use for populating the image.
+
+Optional properties:
+
+- setup-fstab -- create '/etc/fstab' file for image
+
+- setup-kernel-cmdline -- add the information from the 'image-partition'
+action to the configured commandline.
+
+- append-kernel-cmdline -- additional kernel command line arguments passed to kernel.
+*/
+package actions
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "strings"
+
+ "github.com/go-debos/debos"
+ ostree "github.com/sjoerdsimons/ostree-go/pkg/otbuiltin"
+)
+
+type OstreeDeployAction struct {
+ debos.BaseAction `yaml:",inline"`
+ Repository string
+ RemoteRepository string "remote_repository"
+ Branch string
+ Os string
+ SetupFSTab bool `yaml:"setup-fstab"`
+ SetupKernelCmdline bool `yaml:"setup-kernel-cmdline"`
+ AppendKernelCmdline string `yaml:"append-kernel-cmdline"`
+}
+
+func NewOstreeDeployAction() *OstreeDeployAction {
+ ot := &OstreeDeployAction{SetupFSTab: true, SetupKernelCmdline: true}
+ ot.Description = "Deploying from ostree"
+ return ot
+}
+
+func (ot *OstreeDeployAction) setupFSTab(deployment *ostree.Deployment, context *debos.DebosContext) error {
+ deploymentDir := fmt.Sprintf("ostree/deploy/%s/deploy/%s.%d",
+ deployment.Osname(), deployment.Csum(), deployment.Deployserial())
+
+ etcDir := path.Join(context.Rootdir, deploymentDir, "etc")
+
+ err := os.Mkdir(etcDir, 0755)
+ if err != nil && !os.IsExist(err) {
+ return err
+ }
+
+ dst, err := os.OpenFile(path.Join(etcDir, "fstab"), os.O_WRONLY|os.O_CREATE, 0755)
+ defer dst.Close()
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(dst, &context.ImageFSTab)
+
+ return err
+}
+
+func (ot *OstreeDeployAction) Run(context *debos.DebosContext) error {
+ ot.LogStart()
+
+ // This is to handle cases there we didn't partition an image
+ if len(context.ImageMntDir) != 0 {
+ /* First deploy the current rootdir to the image so it can seed e.g.
+ * bootloader configuration */
+ err := debos.Command{}.Run("Deploy to image", "cp", "-a", context.Rootdir+"/.", context.ImageMntDir)
+ if err != nil {
+ return fmt.Errorf("rootfs deploy failed: %v", err)
+ }
+ context.Rootdir = context.ImageMntDir
+ }
+
+ repoPath := "file://" + path.Join(context.Artifactdir, ot.Repository)
+
+ sysroot := ostree.NewSysroot(context.Rootdir)
+ err := sysroot.InitializeFS()
+ if err != nil {
+ return err
+ }
+
+ err = sysroot.InitOsname(ot.Os, nil)
+ if err != nil {
+ return err
+ }
+
+ /* HACK: Getting the repository form the sysroot gets ostree confused on
+ * whether it should configure /etc/ostree or the repo configuration,
+ so reopen by hand */
+ /* dstRepo, err := sysroot.Repo(nil) */
+ dstRepo, err := ostree.OpenRepo(path.Join(context.Rootdir, "ostree/repo"))
+ if err != nil {
+ return err
+ }
+
+ /* FIXME: add support for gpg signing commits so this is no longer needed */
+ opts := ostree.RemoteOptions{NoGpgVerify: true}
+ err = dstRepo.RemoteAdd("origin", ot.RemoteRepository, opts, nil)
+ if err != nil {
+ return err
+ }
+
+ var options ostree.PullOptions
+ options.OverrideRemoteName = "origin"
+ options.Refs = []string{ot.Branch}
+
+ err = dstRepo.PullWithOptions(repoPath, options, nil, nil)
+ if err != nil {
+ return err
+ }
+
+ /* Required by ostree to make sure a bunch of information was pulled in */
+ sysroot.Load(nil)
+
+ revision, err := dstRepo.ResolveRev(ot.Branch, false)
+ if err != nil {
+ return err
+ }
+
+ var kargs []string
+ if ot.SetupKernelCmdline {
+ kargs = append(kargs, context.ImageKernelRoot)
+ }
+
+ if ot.AppendKernelCmdline != "" {
+ s := strings.Split(ot.AppendKernelCmdline, " ")
+ kargs = append(kargs, s...)
+ }
+
+ origin := sysroot.OriginNewFromRefspec("origin:" + ot.Branch)
+ deployment, err := sysroot.DeployTree(ot.Os, revision, origin, nil, kargs, nil)
+ if err != nil {
+ return err
+ }
+
+ if ot.SetupFSTab {
+ err = ot.setupFSTab(deployment, context)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = sysroot.SimpleWriteDeployment(ot.Os, deployment, nil, 0, nil)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/actions/overlay_action.go b/actions/overlay_action.go
new file mode 100644
index 0000000..f17ecb3
--- /dev/null
+++ b/actions/overlay_action.go
@@ -0,0 +1,67 @@
+/*
+Overlay Action
+
+Recursive copy of directory or file to target filesystem.
+
+Yaml syntax:
+ - action: overlay
+ origin: name
+ source: directory
+ destination: directory
+
+Mandatory properties:
+
+- source -- relative path to the directory or file located in path referenced by `origin`.
+In case if this property is absent then pure path referenced by 'origin' will be used.
+
+Optional properties:
+
+- origin -- reference to named file or directory.
+
+- destination -- absolute path in the target rootfs where 'source' will be copied.
+All existing files will be overwritten.
+If destination isn't set '/' of the rootfs will be usedi.
+*/
+package actions
+
+import (
+ "fmt"
+ "path"
+
+ "github.com/go-debos/debos"
+)
+
+type OverlayAction struct {
+ debos.BaseAction `yaml:",inline"`
+ Origin string // origin of overlay, here the export from other action may be used
+ Source string // external path there overlay is
+ Destination string // path inside of rootfs
+}
+
+func (overlay *OverlayAction) Verify(context *debos.DebosContext) error {
+ if _, err := debos.RestrictedPath(context.Rootdir, overlay.Destination); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (overlay *OverlayAction) Run(context *debos.DebosContext) error {
+ overlay.LogStart()
+ origin := context.RecipeDir
+
+ //Trying to get a filename from exports first
+ if len(overlay.Origin) > 0 {
+ var found bool
+ if origin, found = context.Origins[overlay.Origin]; !found {
+ return fmt.Errorf("Origin not found '%s'", overlay.Origin)
+ }
+ }
+
+ sourcedir := path.Join(origin, overlay.Source)
+ destination, err := debos.RestrictedPath(context.Rootdir, overlay.Destination)
+ if err != nil {
+ return err
+ }
+
+ return debos.CopyTree(sourcedir, destination)
+}
diff --git a/actions/pack_action.go b/actions/pack_action.go
new file mode 100644
index 0000000..a90cb1d
--- /dev/null
+++ b/actions/pack_action.go
@@ -0,0 +1,39 @@
+/*
+Pack Action
+
+Create tarball with filesystem.
+
+Yaml syntax:
+ - action: pack
+ file: filename.ext
+ compression: gz
+
+Mandatory properties:
+
+- file -- name of the output tarball.
+
+- compression -- compression type to use. Only 'gz' is supported at the moment.
+
+*/
+package actions
+
+import (
+ "log"
+ "path"
+
+ "github.com/go-debos/debos"
+)
+
+type PackAction struct {
+ debos.BaseAction `yaml:",inline"`
+ Compression string
+ File string
+}
+
+func (pf *PackAction) Run(context *debos.DebosContext) error {
+ pf.LogStart()
+ outfile := path.Join(context.Artifactdir, pf.File)
+
+ log.Printf("Compression to %s\n", outfile)
+ return debos.Command{}.Run("Packing", "tar", "czf", outfile, "-C", context.Rootdir, ".")
+}
diff --git a/actions/raw_action.go b/actions/raw_action.go
new file mode 100644
index 0000000..d3e66fe
--- /dev/null
+++ b/actions/raw_action.go
@@ -0,0 +1,130 @@
+/*
+Raw Action
+
+Directly write a file to the output image at a given offset.
+This is typically useful for bootloaders.
+
+Yaml syntax:
+ - action: raw
+ origin: name
+ source: filename
+ offset: bytes
+
+Mandatory properties:
+
+- origin -- reference to named file or directory.
+
+- source -- the name of file located in 'origin' to be written into the output image.
+
+Optional properties:
+
+- offset -- offset in bytes for output image file.
+It is possible to use internal templating mechanism of debos to calculate offset
+with sectors (512 bytes) instead of bytes, for instance: '{{ sector 256 }}'.
+The default value is zero.
+
+- partition -- named partition to write to
+*/
+package actions
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "path"
+ "strconv"
+
+ "github.com/go-debos/debos"
+)
+
+type RawAction struct {
+ debos.BaseAction `yaml:",inline"`
+ Origin string // there the source comes from
+ Offset string
+ Source string // relative path inside of origin
+ Path string // deprecated option (for backward compatibility)
+ Partition string // Partition to write otherwise full image
+}
+
+func (raw *RawAction) checkDeprecatedSyntax() error {
+
+ // New syntax is based on 'origin' and 'source'
+ // Check if we do not mix new and old syntax
+ // TODO: remove deprecated syntax verification
+ if len(raw.Path) > 0 {
+ // Deprecated syntax based on 'source' and 'path'
+ log.Printf("Usage of 'source' and 'path' properties is deprecated.")
+ log.Printf("Please use 'origin' and 'source' properties.")
+ if len(raw.Origin) > 0 {
+ return errors.New("Can't mix 'origin' and 'path'(deprecated option) properties")
+ }
+ if len(raw.Source) == 0 {
+ return errors.New("'source' and 'path' properties can't be empty")
+ }
+ // Switch to new syntax
+ raw.Origin = raw.Source
+ raw.Source = raw.Path
+ raw.Path = ""
+ }
+ return nil
+}
+
+func (raw *RawAction) Verify(context *debos.DebosContext) error {
+ if err := raw.checkDeprecatedSyntax(); err != nil {
+ return err
+ }
+
+ if len(raw.Origin) == 0 || len(raw.Source) == 0 {
+ return errors.New("'origin' and 'source' properties can't be empty")
+ }
+
+ return nil
+}
+
+func (raw *RawAction) Run(context *debos.DebosContext) error {
+ raw.LogStart()
+ origin, found := context.Origins[raw.Origin]
+ if !found {
+ return fmt.Errorf("Origin `%s` doesn't exist\n", raw.Origin)
+ }
+ s := path.Join(origin, raw.Source)
+ content, err := ioutil.ReadFile(s)
+
+ if err != nil {
+ return fmt.Errorf("Failed to read %s", s)
+ }
+
+ var devicePath string
+ if raw.Partition != "" {
+ for _, p := range context.ImagePartitions {
+ if p.Name == raw.Partition {
+ devicePath = p.DevicePath
+ break
+ }
+ }
+
+ if devicePath == "" {
+ return fmt.Errorf("Failed to find partition named %s", raw.Partition)
+ }
+ } else {
+ devicePath = context.Image
+ }
+
+ target, err := os.OpenFile(devicePath, os.O_WRONLY, 0)
+ if err != nil {
+ return fmt.Errorf("Failed to open %s: %v", devicePath, err)
+ }
+
+ offset, err := strconv.ParseInt(raw.Offset, 0, 64)
+ if err != nil {
+ return fmt.Errorf("Couldn't parse offset %v", err)
+ }
+ bytes, err := target.WriteAt(content, offset)
+ if bytes != len(content) {
+ return errors.New("Couldn't write complete data")
+ }
+
+ return nil
+}
diff --git a/actions/run_action.go b/actions/run_action.go
new file mode 100644
index 0000000..ad374d8
--- /dev/null
+++ b/actions/run_action.go
@@ -0,0 +1,132 @@
+/*
+Run Action
+
+Allows to run any available command or script in the filesystem or
+in host environment.
+
+Yaml syntax:
+ - action: run
+ chroot: bool
+ postprocess: bool
+ script: script name
+ command: command line
+
+Properties 'command' and 'script' are mutually exclusive.
+
+- command -- command with arguments; the command expected to be accessible in
+host's or chrooted environment -- depending on 'chroot' property.
+
+- script -- script with arguments; script must be located in recipe directory.
+
+Optional properties:
+
+- chroot -- run script or command in target filesystem if set to true.
+In other case the command or script is executed within the build process, with
+access to the filesystem and the image. In both cases it is run with root privileges.
+
+- postprocess -- if set script or command is executed after all other commands and
+has access to the image file.
+
+
+Properties 'chroot' and 'postprocess' are mutually exclusive.
+*/
+package actions
+
+import (
+ "errors"
+ "github.com/go-debos/fakemachine"
+ "path"
+ "strings"
+
+ "github.com/go-debos/debos"
+)
+
+type RunAction struct {
+ debos.BaseAction `yaml:",inline"`
+ Chroot bool
+ PostProcess bool
+ Script string
+ Command string
+}
+
+func (run *RunAction) Verify(context *debos.DebosContext) error {
+ if run.PostProcess && run.Chroot {
+ return errors.New("Cannot run postprocessing in the chroot")
+ }
+ return nil
+}
+
+func (run *RunAction) PreMachine(context *debos.DebosContext, m *fakemachine.Machine,
+ args *[]string) error {
+
+ if run.Script == "" {
+ return nil
+ }
+
+ run.Script = debos.CleanPathAt(run.Script, context.RecipeDir)
+ // Expect we have no blank spaces in path
+ scriptpath := strings.Split(run.Script, " ")
+
+ if !run.PostProcess {
+ m.AddVolume(path.Dir(scriptpath[0]))
+ }
+
+ return nil
+}
+
+func (run *RunAction) doRun(context debos.DebosContext) error {
+ run.LogStart()
+ var cmdline []string
+ var label string
+ var cmd debos.Command
+
+ if run.Chroot {
+ cmd = debos.NewChrootCommandForContext(context)
+ } else {
+ cmd = debos.Command{}
+ }
+
+ if run.Script != "" {
+ script := strings.SplitN(run.Script, " ", 2)
+ script[0] = debos.CleanPathAt(script[0], context.RecipeDir)
+ if run.Chroot {
+ scriptpath := path.Dir(script[0])
+ cmd.AddBindMount(scriptpath, "/script")
+ script[0] = strings.Replace(script[0], scriptpath, "/script", 1)
+ }
+ cmdline = []string{strings.Join(script, " ")}
+ label = path.Base(run.Script)
+ } else {
+ cmdline = []string{run.Command}
+ label = run.Command
+ }
+
+ // Command/script with options passed as single string
+ cmdline = append([]string{"sh", "-c"}, cmdline...)
+
+ if !run.PostProcess {
+ if !run.Chroot {
+ cmd.AddEnvKey("ROOTDIR", context.Rootdir)
+ }
+ if context.Image != "" {
+ cmd.AddEnvKey("IMAGE", context.Image)
+ }
+ }
+
+ return cmd.Run(label, cmdline...)
+}
+
+func (run *RunAction) Run(context *debos.DebosContext) error {
+ if run.PostProcess {
+ /* This runs in postprocessing instead */
+ return nil
+ }
+ return run.doRun(*context)
+}
+
+func (run *RunAction) PostMachine(context debos.DebosContext) error {
+ if !run.PostProcess {
+ return nil
+ }
+ return run.doRun(context)
+}
diff --git a/actions/unpack_action.go b/actions/unpack_action.go
new file mode 100644
index 0000000..d4993b1
--- /dev/null
+++ b/actions/unpack_action.go
@@ -0,0 +1,100 @@
+/*
+Unpack Action
+
+Unpack files from archive to the filesystem.
+Useful for creating target rootfs from saved tarball with prepared file structure.
+
+Only (compressed) tar archives are supported currently.
+
+Yaml syntax:
+ - action: unpack
+ origin: name
+ file: file.ext
+ compression: gz
+
+Mandatory properties:
+
+- file -- archive's file name. It is possible to skip this property if 'origin'
+referenced to downloaded file.
+
+One of the mandatory properties may be omitted with limitations mentioned above.
+It is expected to find archive with name pointed in `file` property inside of `origin` in case if both properties are used.
+
+Optional properties:
+
+- origin -- reference to a named file or directory.
+The default value is 'artifacts' directory in case if this property is omitted.
+
+- compression -- optional hint for unpack allowing to use proper compression method.
+
+Currently only 'gz', bzip2' and 'xz' compression types are supported.
+If not provided an attempt to autodetect the compression type will be done.
+*/
+package actions
+
+import (
+ "fmt"
+ "github.com/go-debos/debos"
+)
+
+type UnpackAction struct {
+ debos.BaseAction `yaml:",inline"`
+ Compression string
+ Origin string
+ File string
+}
+
+func (pf *UnpackAction) Verify(context *debos.DebosContext) error {
+
+ if len(pf.Origin) == 0 && len(pf.File) == 0 {
+ return fmt.Errorf("Filename can't be empty. Please add 'file' and/or 'origin' property.")
+ }
+
+ archive, err := debos.NewArchive(pf.File)
+ if err != nil {
+ return err
+ }
+ if len(pf.Compression) > 0 {
+ if archive.Type() != debos.Tar {
+ return fmt.Errorf("Option 'compression' is supported for Tar archives only.")
+ }
+ if err := archive.AddOption("tarcompression", pf.Compression); err != nil {
+ return fmt.Errorf("'%s': %s", pf.File, err)
+ }
+ }
+
+ return nil
+}
+
+func (pf *UnpackAction) Run(context *debos.DebosContext) error {
+ pf.LogStart()
+ var origin string
+
+ if len(pf.Origin) > 0 {
+ var found bool
+ //Trying to get a filename from origins first
+ origin, found = context.Origins[pf.Origin]
+ if !found {
+ return fmt.Errorf("Origin not found '%s'", pf.Origin)
+ }
+ } else {
+ origin = context.Artifactdir
+ }
+
+ infile, err := debos.RestrictedPath(origin, pf.File)
+ if err != nil {
+ return err
+ }
+
+ archive, err := debos.NewArchive(infile)
+ if err != nil {
+ return err
+ }
+ if len(pf.Compression) > 0 {
+ if err := archive.AddOption("tarcompression", pf.Compression); err != nil {
+ return err
+ }
+ }
+
+ return archive.Unpack(context.Rootdir)
+}
diff --git a/archiver.go b/archiver.go
new file mode 100644
index 0000000..e67fdf6
--- /dev/null
+++ b/archiver.go
@@ -0,0 +1,217 @@
+package debos
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+type ArchiveType int
+
+// Supported types
+const (
+ _ ArchiveType = iota // Guess archive type from file extension
+ Tar
+ Zip
+ Deb
+)
+
+type ArchiveBase struct {
+ file string // Path to archive file
+ atype ArchiveType
+ options map[interface{}]interface{} // Archiver-depending map with additional hints
+}
+type ArchiveTar struct {
+ ArchiveBase
+}
+type ArchiveZip struct {
+ ArchiveBase
+}
+type ArchiveDeb struct {
+ ArchiveBase
+}
+
+type Unpacker interface {
+ Unpack(destination string) error
+ RelaxedUnpack(destination string) error
+}
+
+type Archiver interface {
+ Type() ArchiveType
+ AddOption(key, value interface{}) error
+ Unpacker
+}
+
+type Archive struct {
+ Archiver
+}
+
+// Unpack archive as is
+func (arc *ArchiveBase) Unpack(destination string) error {
+ return fmt.Errorf("Unpack is not supported for '%s'", arc.file)
+}
+
+/*
+RelaxedUnpack unpack archive in relaxed mode allowing to ignore or
+avoid minor issues with unpacker tool or framework.
+*/
+func (arc *ArchiveBase) RelaxedUnpack(destination string) error {
+ return arc.Unpack(destination)
+}
+
+func (arc *ArchiveBase) AddOption(key, value interface{}) error {
+ if arc.options == nil {
+ arc.options = make(map[interface{}]interface{})
+ }
+ arc.options[key] = value
+ return nil
+}
+
+func (arc *ArchiveBase) Type() ArchiveType { return arc.atype }
+
+// Helper function for unpacking with external tool
+func unpack(command []string, destination string) error {
+ if err := os.MkdirAll(destination, 0755); err != nil {
+ return err
+ }
+ return Command{}.Run("unpack", command...)
+}
+
+// Helper function for checking allowed compression types
+// Returns empty string for unknown
+func tarOptions(compression string) string {
+ unpackTarOpts := map[string]string{
+ "gz": "-z",
+ "bzip2": "-j",
+ "xz": "-J",
+ } // Trying to guess all other supported compression types
+
+ return unpackTarOpts[compression]
+}
+
+func (tar *ArchiveTar) Unpack(destination string) error {
+ command := []string{"tar"}
+ if options, ok := tar.options["taroptions"].([]string); ok {
+ for _, option := range options {
+ command = append(command, option)
+ }
+ }
+ command = append(command, "-C", destination)
+ command = append(command, "-x")
+
+ if compression, ok := tar.options["tarcompression"]; ok {
+ if unpackTarOpt := tarOptions(compression.(string)); len(unpackTarOpt) > 0 {
+ command = append(command, unpackTarOpt)
+ }
+ }
+ command = append(command, "-f", tar.file)
+
+ return unpack(command, destination)
+}
+
+func (tar *ArchiveTar) RelaxedUnpack(destination string) error {
+
+ taroptions := []string{"--no-same-owner", "--no-same-permissions"}
+ options, ok := tar.options["taroptions"].([]string)
+ defer func() { tar.options["taroptions"] = options }()
+
+ if ok {
+ for _, option := range options {
+ taroptions = append(taroptions, option)
+ }
+ }
+ tar.options["taroptions"] = taroptions
+
+ return tar.Unpack(destination)
+}
+
+func (tar *ArchiveTar) AddOption(key, value interface{}) error {
+
+ switch key {
+ case "taroptions":
+ // expect a slice
+ options, ok := value.([]string)
+ if !ok {
+ return fmt.Errorf("Wrong type for value")
+ }
+ tar.options["taroptions"] = options
+
+ case "tarcompression":
+ compression, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("Wrong type for value")
+ }
+ option := tarOptions(compression)
+ if len(option) == 0 {
+ return fmt.Errorf("Compression '%s' is not supported", compression)
+ }
+ tar.options["tarcompression"] = compression
+
+ default:
+ return fmt.Errorf("Option '%v' is not supported for tar archive type", key)
+ }
+ return nil
+}
+
+func (zip *ArchiveZip) Unpack(destination string) error {
+ command := []string{"unzip", zip.file, "-d", destination}
+ return unpack(command, destination)
+}
+
+func (zip *ArchiveZip) RelaxedUnpack(destination string) error {
+ return zip.Unpack(destination)
+}
+
+func (deb *ArchiveDeb) Unpack(destination string) error {
+ command := []string{"dpkg", "-x", deb.file, destination}
+ return unpack(command, destination)
+}
+
+func (deb *ArchiveDeb) RelaxedUnpack(destination string) error {
+ return deb.Unpack(destination)
+}
+
+/*
+NewArchive associate correct structure and methods according to
+archive type. If ArchiveType is omitted -- trying to guess the type.
+Return ArchiveType or nil in case of error.
+*/
+func NewArchive(file string, arcType ...ArchiveType) (Archive, error) {
+ var archive Archive
+ var atype ArchiveType
+
+ if len(arcType) == 0 {
+ ext := filepath.Ext(file)
+ ext = strings.ToLower(ext)
+
+ switch ext {
+ case ".deb":
+ atype = Deb
+ case ".zip":
+ atype = Zip
+ default:
+ //FIXME: guess Tar maybe?
+ atype = Tar
+ }
+ } else {
+ atype = arcType[0]
+ }
+
+ common := ArchiveBase{}
+ common.file = file
+ common.atype = atype
+ common.options = make(map[interface{}]interface{})
+
+ switch atype {
+ case Tar:
+ archive = Archive{&ArchiveTar{ArchiveBase: common}}
+ case Zip:
+ archive = Archive{&ArchiveZip{ArchiveBase: common}}
+ case Deb:
+ archive = Archive{&ArchiveDeb{ArchiveBase: common}}
+ default:
+ return archive, fmt.Errorf("Unsupported archive '%s'", file)
+ }
+ return archive, nil
+}
diff --git a/archiver_test.go b/archiver_test.go
new file mode 100644
index 0000000..3023c12
--- /dev/null
+++ b/archiver_test.go
@@ -0,0 +1,157 @@
+package debos_test
+
+import (
+ _ "fmt"
+ "github.com/go-debos/debos"
+ "github.com/stretchr/testify/assert"
+ _ "reflect"
+ _ "strings"
+ "testing"
+)
+
+func TestBase(t *testing.T) {
+
+ // New archive
+ // Expect Tar by default
+ _, err := debos.NewArchive("test.base", 0)
+ assert.EqualError(t, err, "Unsupported archive 'test.base'")
+
+ // Test base
+ archive := debos.ArchiveBase{}
+ arcType := archive.Type()
+ assert.Equal(t, 0, int(arcType))
+
+ // Add option
+ err = archive.AddOption("someoption", "somevalue")
+ assert.Empty(t, err)
+
+ err = archive.Unpack("/tmp/test")
+ assert.EqualError(t, err, "Unpack is not supported for ''")
+ err = archive.RelaxedUnpack("/tmp/test")
+ assert.EqualError(t, err, "Unpack is not supported for ''")
+}
+
+func TestTar_default(t *testing.T) {
+
+ // New archive
+ // Expect Tar by default
+ archive, err := debos.NewArchive("test.tar.gz")
+ assert.NotEmpty(t, archive)
+ assert.Empty(t, err)
+
+ // Type must be Tar by default
+ arcType := archive.Type()
+ assert.Equal(t, debos.Tar, arcType)
+
+ // Test unpack
+ err = archive.Unpack("/tmp/test")
+ // Expect unpack failure
+ assert.EqualError(t, err, "exit status 2")
+
+ // Expect failure for RelaxedUnpack
+ err = archive.RelaxedUnpack("/tmp/test")
+ assert.EqualError(t, err, "exit status 2")
+
+ // Check options
+ err = archive.AddOption("taroptions", []string{"--option1"})
+ assert.Empty(t, err)
+ err = archive.Unpack("/tmp/test")
+ assert.EqualError(t, err, "exit status 64")
+ err = archive.Unpack("/proc/debostest")
+ assert.EqualError(t, err, "mkdir /proc/debostest: no such file or directory")
+ err = archive.RelaxedUnpack("/tmp/test")
+ assert.EqualError(t, err, "exit status 64")
+
+ // Add wrong option
+ err = archive.AddOption("someoption", "somevalue")
+ assert.EqualError(t, err, "Option 'someoption' is not supported for tar archive type")
+}
+
+// Check supported compression types
+func TestTar_compression(t *testing.T) {
+ compressions := map[string]string{
+ "gz": "tar -C test -x -z -f test.tar.gz",
+ "bzip2": "tar -C test -x -j -f test.tar.gz",
+ "xz": "tar -C test -x -J -f test.tar.gz",
+ }
+
+ // Force type
+ archive, err := debos.NewArchive("test.tar.gz", debos.Tar)
+ assert.NotEmpty(t, archive)
+ assert.Empty(t, err)
+ // Type must be Tar
+ arcType := archive.Type()
+ assert.Equal(t, debos.Tar, arcType)
+
+ for compression, _ := range compressions {
+ err = archive.AddOption("tarcompression", compression)
+ assert.Empty(t, err)
+ err := archive.Unpack("test")
+ assert.EqualError(t, err, "exit status 2")
+ }
+ // Check of unsupported compression type
+ err = archive.AddOption("tarcompression", "fake")
+ assert.EqualError(t, err, "Compression 'fake' is not supported")
+
+ // Pass incorrect type
+ err = archive.AddOption("taroptions", nil)
+ assert.EqualError(t, err, "Wrong type for value")
+ err = archive.AddOption("tarcompression", nil)
+ assert.EqualError(t, err, "Wrong type for value")
+}
+
+func TestDeb(t *testing.T) {
+
+ // Guess Deb
+ archive, err := debos.NewArchive("test.deb")
+ assert.NotEmpty(t, archive)
+ assert.Empty(t, err)
+
+ // Type must be guessed as Deb
+ arcType := archive.Type()
+ assert.Equal(t, debos.Deb, arcType)
+
+ // Force Deb type
+ archive, err = debos.NewArchive("test.deb", debos.Deb)
+ assert.NotEmpty(t, archive)
+ assert.Empty(t, err)
+
+ // Type must be Deb
+ arcType = archive.Type()
+ assert.Equal(t, debos.Deb, arcType)
+
+ // Expect unpack failure
+ err = archive.Unpack("/tmp/test")
+ assert.EqualError(t, err, "exit status 2")
+ err = archive.Unpack("/proc/debostest")
+ assert.EqualError(t, err, "mkdir /proc/debostest: no such file or directory")
+ err = archive.RelaxedUnpack("/tmp/test")
+ assert.EqualError(t, err, "exit status 2")
+}
+
+func TestZip(t *testing.T) {
+ // Guess zip
+ archive, err := debos.NewArchive("test.ZiP")
+ assert.NotEmpty(t, archive)
+ assert.Empty(t, err)
+ // Type must be guessed as Zip
+ arcType := archive.Type()
+ assert.Equal(t, debos.Zip, arcType)
+
+ // Force Zip type
+ archive, err = debos.NewArchive("test.zip", debos.Zip)
+ assert.NotEmpty(t, archive)
+ assert.Empty(t, err)
+
+ // Type must be Zip
+ arcType = archive.Type()
+ assert.Equal(t, debos.Zip, arcType)
+
+ // Expect unpack failure
+ err = archive.Unpack("/tmp/test")
+ assert.EqualError(t, err, "exit status 9")
+ err = archive.Unpack("/proc/debostest")
+ assert.EqualError(t, err, "mkdir /proc/debostest: no such file or directory")
+ err = archive.RelaxedUnpack("/tmp/test")
+ assert.EqualError(t, err, "exit status 9")
+}
diff --git a/cmd/debos/debos.go b/cmd/debos/debos.go
new file mode 100644
index 0000000..caff24b
--- /dev/null
+++ b/cmd/debos/debos.go
@@ -0,0 +1,219 @@
+package main
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "path"
+
+ "github.com/docker/go-units"
+ "github.com/go-debos/debos"
+ "github.com/go-debos/debos/recipe"
+ "github.com/go-debos/fakemachine"
+ "github.com/jessevdk/go-flags"
+)
+
+func checkError(context debos.DebosContext, err error, a debos.Action, stage string) int {
+ if err == nil {
+ return 0
+ }
+
+ log.Printf("Action `%s` failed at stage %s, error: %s", a, stage, err)
+ debos.DebugShell(context)
+ return 1
+}
+
+func main() {
+ var context debos.DebosContext
+ var options struct {
+ ArtifactDir string `long:"artifactdir"`
+ InternalImage string `long:"internal-image" hidden:"true"`
+ TemplateVars map[string]string `short:"t" long:"template-var" description:"Template variables"`
+ DebugShell bool `long:"debug-shell" description:"Fall into interactive shell on error"`
+ Shell string `short:"s" long:"shell" description:"Redefine interactive shell binary (default: bash)" optionsl:"" default:"/bin/bash"`
+ ScratchSize string `long:"scratchsize" description:"Size of disk backed scratch space"`
+ }
+
+ var exitcode int = 0
+ // Allow to run all deferred calls prior to os.Exit()
+ defer func() {
+ os.Exit(exitcode)
+ }()
+
+ parser := flags.NewParser(&options, flags.Default)
+ args, err := parser.Parse()
+
+ if err != nil {
+ flagsErr, ok := err.(*flags.Error)
+ if ok && flagsErr.Type == flags.ErrHelp {
+ return
+ } else {
+ fmt.Printf("%v\n", flagsErr)
+ exitcode = 1
+ return
+ }
+ }
+
+ if len(args) != 1 {
+ log.Println("No recipe given!")
+ exitcode = 1
+ return
+ }
+
+ // Set interactive shell binary only if '--debug-shell' options passed
+ if options.DebugShell {
+ context.DebugShell = options.Shell
+ }
+
+ file := args[0]
+ file = debos.CleanPath(file)
+
+ r := recipe.Recipe{}
+ if _, err := os.Stat(file); os.IsNotExist(err) {
+ log.Println(err)
+ exitcode = 1
+ return
+ }
+ if err := r.Parse(file, options.TemplateVars); err != nil {
+ log.Println(err)
+ exitcode = 1
+ return
+ }
+
+ /* If fakemachine is supported the outer fake machine will never use the
+ * scratchdir, so just set it to /scrach as a dummy to prevent the outer
+ * debos createing a temporary direction */
+ if fakemachine.InMachine() || fakemachine.Supported() {
+ context.Scratchdir = "/scratch"
+ } else {
+ log.Printf("fakemachine not supported, running on the host!")
+ cwd, _ := os.Getwd()
+ context.Scratchdir, err = ioutil.TempDir(cwd, ".debos-")
+ defer os.RemoveAll(context.Scratchdir)
+ }
+
+ context.Rootdir = path.Join(context.Scratchdir, "root")
+ context.Image = options.InternalImage
+ context.RecipeDir = path.Dir(file)
+
+ context.Artifactdir = options.ArtifactDir
+ if context.Artifactdir == "" {
+ context.Artifactdir, _ = os.Getwd()
+ }
+ context.Artifactdir = debos.CleanPath(context.Artifactdir)
+
+ // Initialise origins map
+ context.Origins = make(map[string]string)
+ context.Origins["artifacts"] = context.Artifactdir
+ context.Origins["filesystem"] = context.Rootdir
+ context.Origins["recipe"] = context.RecipeDir
+
+ context.Architecture = r.Architecture
+
+ for _, a := range r.Actions {
+ err = a.Verify(&context)
+ if exitcode = checkError(context, err, a, "Verify"); exitcode != 0 {
+ return
+ }
+ }
+
+ if !fakemachine.InMachine() && fakemachine.Supported() {
+ m := fakemachine.NewMachine()
+ var args []string
+
+ if options.ScratchSize != "" {
+ size, err := units.FromHumanSize(options.ScratchSize)
+ if err != nil {
+ fmt.Printf("Couldn't parse scratch size: %v\n", err)
+ exitcode = 1
+ return
+ }
+ m.SetScratch(size, "")
+ }
+
+ m.AddVolume(context.Artifactdir)
+ args = append(args, "--artifactdir", context.Artifactdir)
+
+ for k, v := range options.TemplateVars {
+ args = append(args, "--template-var", fmt.Sprintf("%s:\"%s\"", k, v))
+ }
+
+ m.AddVolume(context.RecipeDir)
+ args = append(args, file)
+
+ if options.DebugShell {
+ args = append(args, "--debug-shell")
+ args = append(args, "--shell", fmt.Sprintf("%s", options.Shell))
+ }
+
+ for _, a := range r.Actions {
+ err = a.PreMachine(&context, m, &args)
+ if exitcode = checkError(context, err, a, "PreMachine"); exitcode != 0 {
+ return
+ }
+ }
+
+ exitcode, err = m.RunInMachineWithArgs(args)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+
+ if exitcode != 0 {
+ return
+ }
+
+ for _, a := range r.Actions {
+ err = a.PostMachine(context)
+ if exitcode = checkError(context, err, a, "Postmachine"); exitcode != 0 {
+ return
+ }
+ }
+
+ log.Printf("==== Recipe done ====")
+ return
+ }
+
+ if !fakemachine.InMachine() {
+ for _, a := range r.Actions {
+ err = a.PreNoMachine(&context)
+ if exitcode = checkError(context, err, a, "PreNoMachine"); exitcode != 0 {
+ return
+ }
+ }
+ }
+
+ // Create Rootdir
+ if _, err = os.Stat(context.Rootdir); os.IsNotExist(err) {
+ err = os.Mkdir(context.Rootdir, 0755)
+ if err != nil && os.IsNotExist(err) {
+ exitcode = 1
+ return
+ }
+ }
+
+ for _, a := range r.Actions {
+ err = a.Run(&context)
+ if exitcode = checkError(context, err, a, "Run"); exitcode != 0 {
+ return
+ }
+ }
+
+ for _, a := range r.Actions {
+ err = a.Cleanup(context)
+ if exitcode = checkError(context, err, a, "Cleanup"); exitcode != 0 {
+ return
+ }
+ }
+
+ if !fakemachine.InMachine() {
+ for _, a := range r.Actions {
+ err = a.PostMachine(context)
+ if exitcode = checkError(context, err, a, "PostMachine"); exitcode != 0 {
+ return
+ }
+ }
+ log.Printf("==== Recipe done ====")
+ }
+}
diff --git a/commands.go b/commands.go
new file mode 100644
index 0000000..557b636
--- /dev/null
+++ b/commands.go
@@ -0,0 +1,203 @@
+package debos
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "os/exec"
+ "path"
+)
+
+type ChrootEnterMethod int
+
+const (
+ CHROOT_METHOD_NONE = iota // use nspawn to create the chroot environment
+ CHROOT_METHOD_NSPAWN // No chroot in use
+ CHROOT_METHOD_CHROOT // use chroot to create the chroot environment
+)
+
+type Command struct {
+ Architecture string // Architecture of the chroot, nil if same as host
+ Dir string // Working dir to run command in
+ Chroot string // Run in the chroot at path
+ ChrootMethod ChrootEnterMethod // Method to enter the chroot
+
+ bindMounts []string /// Items to bind mount
+ extraEnv []string // Extra environment variables to set
+}
+
+type commandWrapper struct {
+ label string
+ buffer *bytes.Buffer
+}
+
+func newCommandWrapper(label string) *commandWrapper {
+ b := bytes.Buffer{}
+ return &commandWrapper{label, &b}
+}
+
+func (w commandWrapper) out(atEOF bool) {
+ for {
+ s, err := w.buffer.ReadString('\n')
+ if err == nil {
+ log.Printf("%s | %v", w.label, s)
+ } else {
+ if len(s) > 0 {
+ if atEOF && err == io.EOF {
+ log.Printf("%s | %v\n", w.label, s)
+ } else {
+ w.buffer.WriteString(s)
+ }
+ }
+ break
+ }
+ }
+}
+
+func (w commandWrapper) Write(p []byte) (n int, err error) {
+ n, err = w.buffer.Write(p)
+ w.out(false)
+ return
+}
+
+func (w *commandWrapper) flush() {
+ w.out(true)
+}
+
+func NewChrootCommandForContext(context DebosContext) Command {
+ c := Command{Architecture: context.Architecture, Chroot: context.Rootdir, ChrootMethod: CHROOT_METHOD_NSPAWN}
+
+ if context.Image != "" {
+ path, err := RealPath(context.Image)
+ if err == nil {
+ c.AddBindMount(path, "")
+ } else {
+ log.Printf("Failed to get realpath for %s, %v", context.Image, err)
+ }
+ for _, p := range context.ImagePartitions {
+ path, err := RealPath(p.DevicePath)
+ if err != nil {
+ log.Printf("Failed to get realpath for %s, %v", p.DevicePath, err)
+ continue
+ }
+ c.AddBindMount(path, "")
+ }
+ c.AddBindMount("/dev/disk", "")
+ }
+
+ return c
+}
+
+func (cmd *Command) AddEnv(env string) {
+ cmd.extraEnv = append(cmd.extraEnv, env)
+}
+
+func (cmd *Command) AddEnvKey(key, value string) {
+ cmd.extraEnv = append(cmd.extraEnv, fmt.Sprintf("%s=%s", key, value))
+}
+
+func (cmd *Command) AddBindMount(source, target string) {
+ var mount string
+ if target != "" {
+ mount = fmt.Sprintf("%s:%s", source, target)
+ } else {
+ mount = source
+ }
+
+ cmd.bindMounts = append(cmd.bindMounts, mount)
+}
+
+func (cmd Command) Run(label string, cmdline ...string) error {
+ q := newQemuHelper(cmd)
+ q.Setup()
+
+ var options []string
+ switch cmd.ChrootMethod {
+ case CHROOT_METHOD_NONE:
+ options = cmdline
+ case CHROOT_METHOD_CHROOT:
+ options = append(options, "chroot")
+ options = append(options, cmd.Chroot)
+ options = append(options, cmdline...)
+ case CHROOT_METHOD_NSPAWN:
+ options = append(options, "systemd-nspawn", "-q", "-D", cmd.Chroot)
+ for _, e := range cmd.extraEnv {
+ options = append(options, "--setenv", e)
+
+ }
+ for _, b := range cmd.bindMounts {
+ options = append(options, "--bind", b)
+
+ }
+ options = append(options, cmdline...)
+ }
+
+ exe := exec.Command(options[0], options[1:]...)
+ w := newCommandWrapper(label)
+
+ exe.Stdin = nil
+ exe.Stdout = w
+ exe.Stderr = w
+
+ if len(cmd.extraEnv) > 0 && cmd.ChrootMethod != CHROOT_METHOD_NSPAWN {
+ exe.Env = append(os.Environ(), cmd.extraEnv...)
+ }
+
+ // Disable services start/stop for commands running in chroot
+ if cmd.ChrootMethod != CHROOT_METHOD_NONE {
+ services := ServiceHelper{cmd.Chroot}
+ services.Deny()
+ defer services.Allow()
+ }
+
+ err := exe.Run()
+ w.flush()
+ q.Cleanup()
+
+ return err
+}
+
+type qemuHelper struct {
+ qemusrc string
+ qemutarget string
+}
+
+func newQemuHelper(c Command) qemuHelper {
+ q := qemuHelper{}
+
+ if c.Chroot == "" || c.Architecture == "" {
+ return q
+ }
+
+ switch c.Architecture {
+ case "armhf", "armel", "arm":
+ q.qemusrc = "/usr/bin/qemu-arm-static"
+ case "arm64":
+ q.qemusrc = "/usr/bin/qemu-aarch64-static"
+ case "amd64", "i386":
+ /* Dummy, no qemu */
+ default:
+ log.Panicf("Don't know qemu for Architecture %s", c.Architecture)
+ }
+
+ if q.qemusrc != "" {
+ q.qemutarget = path.Join(c.Chroot, q.qemusrc)
+ }
+
+ return q
+}
+
+func (q qemuHelper) Setup() error {
+ if q.qemusrc == "" {
+ return nil
+ }
+ return CopyFile(q.qemusrc, q.qemutarget, 0755)
+}
+
+func (q qemuHelper) Cleanup() {
+ if q.qemusrc != "" {
+ os.Remove(q.qemutarget)
+ }
+}
diff --git a/commands_test.go b/commands_test.go
new file mode 100644
index 0000000..1fe4d46
--- /dev/null
+++ b/commands_test.go
@@ -0,0 +1,9 @@
+package debos
+
+import (
+ "testing"
+)
+
+func TestBasicCommand(t *testing.T) {
+ Command{}.Run("out", "ls", "-l")
+}
diff --git a/debug.go b/debug.go
new file mode 100644
index 0000000..1fd1356
--- /dev/null
+++ b/debug.go
@@ -0,0 +1,31 @@
+package debos
+
+import (
+ "fmt"
+ "log"
+ "os"
+)
+
+/*
+DebugShell function launches an interactive shell for
+debug and problems investigation.
+*/
+func DebugShell(context DebosContext) {
+
+ if len(context.DebugShell) == 0 {
+ return
+ }
+
+ pa := os.ProcAttr{
+ Files: []*os.File{os.Stdin, os.Stdout, os.Stderr},
+ Dir: context.Scratchdir,
+ }
+
+ // Start an interactive shell for debug.
+ log.Printf(">>> Starting a debug shell")
+ if proc, err := os.StartProcess(context.DebugShell, []string{}, &pa); err != nil {
+ fmt.Printf("Failed: %s\n", err)
+ } else {
+ proc.Wait()
+ }
+}
diff --git a/filesystem.go b/filesystem.go
new file mode 100644
index 0000000..a6e5239
--- /dev/null
+++ b/filesystem.go
@@ -0,0 +1,105 @@
+package debos
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+)
+
+func CleanPathAt(path, at string) string {
+ if filepath.IsAbs(path) {
+ return filepath.Clean(path)
+ }
+
+ return filepath.Join(at, path)
+}
+
+func CleanPath(path string) string {
+ cwd, _ := os.Getwd()
+ return CleanPathAt(path, cwd)
+}
+
+func CopyFile(src, dst string, mode os.FileMode) error {
+ in, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer in.Close()
+ tmp, err := ioutil.TempFile(filepath.Dir(dst), "")
+ if err != nil {
+ return err
+ }
+ _, err = io.Copy(tmp, in)
+ if err != nil {
+ tmp.Close()
+ os.Remove(tmp.Name())
+ return err
+ }
+ if err = tmp.Close(); err != nil {
+ os.Remove(tmp.Name())
+ return err
+ }
+ if err = os.Chmod(tmp.Name(), mode); err != nil {
+ os.Remove(tmp.Name())
+ return err
+ }
+ return os.Rename(tmp.Name(), dst)
+}
+
+func CopyTree(sourcetree, desttree string) error {
+ fmt.Printf("Overlaying %s on %s\n", sourcetree, desttree)
+ walker := func(p string, info os.FileInfo, err error) error {
+
+ if err != nil {
+ return err
+ }
+
+ suffix, _ := filepath.Rel(sourcetree, p)
+ target := path.Join(desttree, suffix)
+ switch info.Mode() & os.ModeType {
+ case 0:
+ CopyFile(p, target, info.Mode())
+ case os.ModeDir:
+ os.Mkdir(target, info.Mode())
+ case os.ModeSymlink:
+ link, err := os.Readlink(p)
+ if err != nil {
+ log.Panicf("Failed to read symlink %s: %v", suffix, err)
+ }
+ os.Symlink(link, target)
+ default:
+ log.Panicf("Not handled /%s %v", suffix, info.Mode())
+ }
+
+ return nil
+ }
+
+ return filepath.Walk(sourcetree, walker)
+}
+
+func RealPath(path string) (string, error) {
+ p, err := filepath.EvalSymlinks(path)
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Abs(p)
+}
+
+func RestrictedPath(prefix, dest string) (string, error) {
+ var err error
+ destination := path.Join(prefix, dest)
+ destination, err = filepath.Abs(destination)
+ if err != nil {
+ return "", err
+ }
+ if !strings.HasPrefix(destination, prefix) {
+ return "", fmt.Errorf("The resulting path points outside of prefix '%s': '%s'\n", prefix, destination)
+ }
+ return destination, nil
+}
diff --git a/net.go b/net.go
new file mode 100644
index 0000000..989c90d
--- /dev/null
+++ b/net.go
@@ -0,0 +1,45 @@
+package debos
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "os"
+)
+
+// Function for downloading single file object with http(s) protocol
+func DownloadHttpUrl(url, filename string) error {
+ log.Printf("Download started: '%s' -> '%s'\n", url, filename)
+
+ // TODO: Proxy support?
+
+ // Check if file object already exists.
+ fi, err := os.Stat(filename)
+ if !os.IsNotExist(err) && !fi.Mode().IsRegular() {
+ return fmt.Errorf("Failed to download '%s': '%s' exists and it is not a regular file\n", url, filename)
+ }
+
+ resp, err := http.Get(url)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("Url '%s' returned status code %d (%s)\n", url, resp.StatusCode, http.StatusText(resp.StatusCode))
+ }
+
+ // Output file
+ output, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer output.Close()
+
+ if _, err := io.Copy(output, resp.Body); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/os.go b/os.go
new file mode 100644
index 0000000..67abba1
--- /dev/null
+++ b/os.go
@@ -0,0 +1,74 @@
+package debos
+
+import (
+ "fmt"
+ "os"
+ "path"
+)
+
+const debianPolicyHelper = "/usr/sbin/policy-rc.d"
+
+/*
+ServiceHelper is used to manage services.
+Currently supports only debian-based family.
+*/
+
+type ServiceHelper struct {
+ Rootdir string
+}
+
+type ServicesManager interface {
+ Allow() error
+ Deny() error
+}
+
+/*
+Allow() allows to start/stop services on OS level.
+*/
+func (s *ServiceHelper) Allow() error {
+
+ helperFile := path.Join(s.Rootdir, debianPolicyHelper)
+
+ if _, err := os.Stat(helperFile); os.IsNotExist(err) {
+ return nil
+ }
+ if err := os.Remove(helperFile); err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+Deny() prohibits to start/stop services on OS level.
+*/
+func (s *ServiceHelper) Deny() error {
+
+ helperFile := path.Join(s.Rootdir, debianPolicyHelper)
+ var helper = []byte(`#!/bin/sh
+
+exit 101
+`)
+
+ if _, err := os.Stat(helperFile); os.IsExist(err) {
+ return fmt.Errorf("Policy helper file '%s' exists already", debianPolicyHelper)
+ }
+ if _, err := os.Stat(path.Dir(helperFile)); os.IsNotExist(err) {
+ // do not try to do something if ".../usr/sbin" is not exists
+ return nil
+ }
+ pf, err := os.Create(helperFile)
+ if err != nil {
+ return err
+ }
+ defer pf.Close()
+
+ if _, err := pf.Write(helper); err != nil {
+ return err
+ }
+
+ if err := pf.Chmod(0755); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/recipe/recipe.go b/recipe/recipe.go
new file mode 100644
index 0000000..49ff5a8
--- /dev/null
+++ b/recipe/recipe.go
@@ -0,0 +1,175 @@
+/*
+Package 'recipe' implements actions mapping to YAML recipe.
+
+Recipe syntax
+
+Recipe is a YAML file which is pre-processed though Golang
+text templating engine (https://golang.org/pkg/text/template)
+
+Recipe is composed of 2 parts:
+
+- header
+
+- actions
+
+Comments are allowed and should be prefixed with '#' symbol.
+
+ # Declare variable 'Var'
+ {{- $Var := "Value" -}}
+
+ # Header
+ architecture: arm64
+
+ # Actions are executed in listed order
+ actions:
+ - action: ActionName1
+ property1: true
+
+ - action: ActionName2
+ # Use value of variable 'Var' defined above
+ property2: {{$Var}}
+
+Mandatory properties for receipt:
+
+- architecture -- target architecture
+
+- actions -- at least one action should be listed
+
+Supported actions
+
+- apt -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Apt_Action
+
+- debootstrap -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Debootstrap_Action
+
+- download -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Download_Action
+
+- filesystem-deploy -- https://godoc.org/github.com/go-debos/debos/actions#hdr-FilesystemDeploy_Action
+
+- image-partition -- https://godoc.org/github.com/go-debos/debos/actions#hdr-ImagePartition_Action
+
+- ostree-commit -- https://godoc.org/github.com/go-debos/debos/actions#hdr-OstreeCommit_Action
+
+- ostree-deploy -- https://godoc.org/github.com/go-debos/debos/actions#hdr-OstreeDeploy_Action
+
+- overlay -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Overlay_Action
+
+- pack -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Pack_Action
+
+- raw -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Raw_Action
+
+- run -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Run_Action
+
+- unpack -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Unpack_Action
+*/
+package recipe
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/go-debos/debos"
+ "github.com/go-debos/debos/actions"
+ "gopkg.in/yaml.v2"
+ "path"
+ "text/template"
+)
+
+/* the YamlAction just embed the Action interface and implements the
+ * UnmarshalYAML function so it can select the concrete implementer of a
+ * specific action at unmarshaling time */
+type YamlAction struct {
+ debos.Action
+}
+
+type Recipe struct {
+ Architecture string
+ Actions []YamlAction
+}
+
+func (y *YamlAction) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var aux debos.BaseAction
+
+ err := unmarshal(&aux)
+ if err != nil {
+ return err
+ }
+
+ switch aux.Action {
+ case "debootstrap":
+ y.Action = actions.NewDebootstrapAction()
+ case "pack":
+ y.Action = &actions.PackAction{}
+ case "unpack":
+ y.Action = &actions.UnpackAction{}
+ case "run":
+ y.Action = &actions.RunAction{}
+ case "apt":
+ y.Action = &actions.AptAction{}
+ case "ostree-commit":
+ y.Action = &actions.OstreeCommitAction{}
+ case "ostree-deploy":
+ y.Action = actions.NewOstreeDeployAction()
+ case "overlay":
+ y.Action = &actions.OverlayAction{}
+ case "image-partition":
+ y.Action = &actions.ImagePartitionAction{}
+ case "filesystem-deploy":
+ y.Action = actions.NewFilesystemDeployAction()
+ case "raw":
+ y.Action = &actions.RawAction{}
+ case "download":
+ y.Action = &actions.DownloadAction{}
+ default:
+ return fmt.Errorf("Unknown action: %v", aux.Action)
+ }
+
+ unmarshal(y.Action)
+
+ return nil
+}
+
+func sector(s int) int {
+ return s * 512
+}
+
+/*
+Parse method reads YAML recipe file and map all steps to appropriate actions.
+
+- file -- is the path to configuration file
+
+- templateVars -- optional argument allowing to use custom map for templating
+engine. Multiple template maps have no effect; only first map will be used.
+*/
+func (r *Recipe) Parse(file string, templateVars ...map[string]string) error {
+ t := template.New(path.Base(file))
+ funcs := template.FuncMap{
+ "sector": sector,
+ }
+ t.Funcs(funcs)
+
+ if _, err := t.ParseFiles(file); err != nil {
+ return err
+ }
+
+ if len(templateVars) == 0 {
+ templateVars = append(templateVars, make(map[string]string))
+ }
+
+ data := new(bytes.Buffer)
+ if err := t.Execute(data, templateVars[0]); err != nil {
+ return err
+ }
+
+ if err := yaml.Unmarshal(data.Bytes(), &r); err != nil {
+ return err
+ }
+
+ if len(r.Architecture) == 0 {
+ return fmt.Errorf("Recipe file must have 'architecture' property")
+ }
+
+ if len(r.Actions) == 0 {
+ return fmt.Errorf("Recipe file must have at least one action")
+ }
+
+ return nil
+}
diff --git a/recipe/recipe_test.go b/recipe/recipe_test.go
new file mode 100644
index 0000000..01457e9
--- /dev/null
+++ b/recipe/recipe_test.go
@@ -0,0 +1,182 @@
+package recipe_test
+
+import (
+ "github.com/go-debos/debos/recipe"
+ "github.com/stretchr/testify/assert"
+ "io/ioutil"
+ "os"
+ "testing"
+)
+
+type testRecipe struct {
+ recipe string
+ err string
+}
+
+// Test if incorrect file has been passed
+func TestParse_incorrect_file(t *testing.T) {
+ var err error
+
+ var tests = []struct {
+ filename string
+ err string
+ }{
+ {
+ "non-existing.yaml",
+ "open non-existing.yaml: no such file or directory",
+ },
+ {
+ "/proc",
+ "read /proc: is a directory",
+ },
+ }
+
+ for _, test := range tests {
+ r := recipe.Recipe{}
+ err = r.Parse(test.filename)
+ assert.EqualError(t, err, test.err)
+ }
+}
+
+// Check common recipe syntax
+func TestParse_syntax(t *testing.T) {
+
+ var tests = []testRecipe{
+ // Test if all actions are supported
+ {`
+architecture: arm64
+
+actions:
+ - action: apt
+ - action: debootstrap
+ - action: download
+ - action: filesystem-deploy
+ - action: image-partition
+ - action: ostree-commit
+ - action: ostree-deploy
+ - action: overlay
+ - action: pack
+ - action: raw
+ - action: run
+ - action: unpack
+`,
+ "", // Do not expect failure
+ },
+ // Test of unknown action in list
+ {`
+architecture: arm64
+
+actions:
+ - action: test_unknown_action
+`,
+ "Unknown action: test_unknown_action",
+ },
+ // Test if 'architecture' property absence
+ {`
+actions:
+ - action: raw
+`,
+ "Recipe file must have 'architecture' property",
+ },
+ // Test if no actions listed
+ {`
+architecture: arm64
+`,
+ "Recipe file must have at least one action",
+ },
+ // Test of wrong syntax in Yaml
+ {`wrong`,
+ "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `wrong` into recipe.Recipe",
+ },
+ // Test if no actions listed
+ {`
+architecture: arm64
+`,
+ "Recipe file must have at least one action",
+ },
+ }
+
+ for _, test := range tests {
+ runTest(t, test)
+ }
+}
+
+// Check template engine
+func TestParse_template(t *testing.T) {
+
+ var test = testRecipe{
+ // Test template variables
+ `
+{{ $action:= or .action "download" }}
+architecture: arm64
+actions:
+ - action: {{ $action }}
+`,
+ "", // Do not expect failure
+ }
+
+ { // Test of embedded template
+ r := runTest(t, test)
+ assert.Equalf(t, r.Actions[0].String(), "download",
+ "Fail to use embedded variable definition from recipe:%s\n",
+ test.recipe)
+ }
+
+ { // Test of user-defined template variable
+ var templateVars = map[string]string{
+ "action": "pack",
+ }
+
+ r := runTest(t, test, templateVars)
+ assert.Equalf(t, r.Actions[0].String(), "pack",
+ "Fail to redefine variable with user-defined map:%s\n",
+ test.recipe)
+ }
+}
+
+// Test of 'sector' function embedded to recipe package
+func TestParse_sector(t *testing.T) {
+ var testSector = testRecipe{
+ // Fail with unknown action
+ `
+architecture: arm64
+
+actions:
+ - action: {{ sector 42 }}
+`,
+ "Unknown action: 21504",
+ }
+ runTest(t, testSector)
+}
+
+func runTest(t *testing.T, test testRecipe, templateVars ...map[string]string) recipe.Recipe {
+ file, err := ioutil.TempFile(os.TempDir(), "recipe")
+ assert.Empty(t, err)
+ defer os.Remove(file.Name())
+
+ file.WriteString(test.recipe)
+ file.Close()
+
+ r := recipe.Recipe{}
+ if len(templateVars) == 0 {
+ err = r.Parse(file.Name())
+ } else {
+ err = r.Parse(file.Name(), templateVars[0])
+ }
+
+ failed := false
+
+ if len(test.err) > 0 {
+ // Expected error?
+ failed = !assert.EqualError(t, err, test.err)
+ } else {
+ // Unexpected error
+ failed = !assert.Empty(t, err)
+ }
+
+ if failed {
+ t.Logf("Failed recipe:%s\n", test.recipe)
+ }
+
+ return r
+}