summaryrefslogtreecommitdiff
path: root/mkosi
diff options
context:
space:
mode:
Diffstat (limited to 'mkosi')
-rwxr-xr-xmkosi2904
1 files changed, 2904 insertions, 0 deletions
diff --git a/mkosi b/mkosi
new file mode 100755
index 0000000..cc09c8f
--- /dev/null
+++ b/mkosi
@@ -0,0 +1,2904 @@
+#!/usr/bin/python3
+# PYTHON_ARGCOMPLETE_OK
+
+import argparse
+import configparser
+import contextlib
+import ctypes, ctypes.util
+import crypt
+import getpass
+import hashlib
+import os
+import platform
+import shutil
+import stat
+import subprocess
+import sys
+import tempfile
+import time
+import urllib.request
+import uuid
+
+try:
+ import argcomplete
+except ImportError:
+ pass
+
+from enum import Enum
+
+__version__ = '3'
+
+if sys.version_info < (3, 5):
+ sys.exit("Sorry, we need at least Python 3.5.")
+
+# TODO
+# - volatile images
+# - make ubuntu images bootable
+# - work on device nodes
+# - allow passing env vars
+
+def die(message, status=1):
+ assert status >= 1 and status < 128
+ sys.stderr.write(message + "\n")
+ sys.exit(status)
+
+class OutputFormat(Enum):
+ raw_gpt = 1
+ raw_btrfs = 2
+ raw_squashfs = 3
+ directory = 4
+ subvolume = 5
+ tar = 6
+
+class Distribution(Enum):
+ fedora = 1
+ debian = 2
+ ubuntu = 3
+ arch = 4
+ opensuse = 5
+ mageia = 6
+
+GPT_ROOT_X86 = uuid.UUID("44479540f29741b29af7d131d5f0458a")
+GPT_ROOT_X86_64 = uuid.UUID("4f68bce3e8cd4db196e7fbcaf984b709")
+GPT_ROOT_ARM = uuid.UUID("69dad7102ce44e3cb16c21a1d49abed3")
+GPT_ROOT_ARM_64 = uuid.UUID("b921b0451df041c3af444c6f280d3fae")
+GPT_ROOT_IA64 = uuid.UUID("993d8d3df80e4225855a9daf8ed7ea97")
+GPT_ESP = uuid.UUID("c12a7328f81f11d2ba4b00a0c93ec93b")
+GPT_SWAP = uuid.UUID("0657fd6da4ab43c484e50933c84b4f4f")
+GPT_HOME = uuid.UUID("933ac7e12eb44f13b8440e14e2aef915")
+GPT_SRV = uuid.UUID("3b8f842520e04f3b907f1a25a76f98e8")
+GPT_ROOT_X86_VERITY = uuid.UUID("d13c5d3bb5d1422ab29f9454fdc89d76")
+GPT_ROOT_X86_64_VERITY = uuid.UUID("2c7357edebd246d9aec123d437ec2bf5")
+GPT_ROOT_ARM_VERITY = uuid.UUID("7386cdf2203c47a9a498f2ecce45a2d6")
+GPT_ROOT_ARM_64_VERITY = uuid.UUID("df3300ced69f4c92978c9bfb0f38d820")
+GPT_ROOT_IA64_VERITY = uuid.UUID("86ed10d5b60745bb8957d350f23d0571")
+
+if platform.machine() == "x86_64":
+ GPT_ROOT_NATIVE = GPT_ROOT_X86_64
+ GPT_ROOT_NATIVE_VERITY = GPT_ROOT_X86_64_VERITY
+elif platform.machine() == "aarch64":
+ GPT_ROOT_NATIVE = GPT_ROOT_ARM_64
+ GPT_ROOT_NATIVE_VERITY = GPT_ROOT_ARM_64_VERITY
+else:
+ die("Don't know the %s architecture." % platform.machine())
+
+CLONE_NEWNS = 0x00020000
+
+FEDORA_KEYS_MAP = {
+ "23": "34EC9CBA",
+ "24": "81B46521",
+ "25": "FDB19C98",
+ "26": "64DAB85D",
+}
+
+# 1 MB at the beginning of the disk for the GPT disk label, and
+# another MB at the end (this is actually more than needed.)
+GPT_HEADER_SIZE = 1024*1024
+GPT_FOOTER_SIZE = 1024*1024
+
+def unshare(flags):
+ libc = ctypes.CDLL(ctypes.util.find_library("c"), use_errno=True)
+
+ if libc.unshare(ctypes.c_int(flags)) != 0:
+ e = ctypes.get_errno()
+ raise OSError(e, os.strerror(e))
+
+def format_bytes(bytes):
+ if bytes >= 1024*1024*1024:
+ return "{:0.1f}G".format(bytes / 1024**3)
+ if bytes >= 1024*1024:
+ return "{:0.1f}M".format(bytes / 1024**2)
+ if bytes >= 1024:
+ return "{:0.1f}K".format(bytes / 1024)
+
+ return "{}B".format(bytes)
+
+def roundup512(x):
+ return (x + 511) & ~511
+
+def print_step(text):
+ sys.stderr.write("‣ \033[0;1;39m" + text + "\033[0m\n")
+
+@contextlib.contextmanager
+def complete_step(text, text2=None):
+ print_step(text + '...')
+ args = []
+ yield args
+ if text2 is None:
+ text2 = text + ' complete'
+ print_step(text2.format(*args) + '.')
+
+@complete_step('Detaching namespace')
+def init_namespace(args):
+ args.original_umask = os.umask(0o000)
+ unshare(CLONE_NEWNS)
+ subprocess.run(["mount", "--make-rslave", "/"], check=True)
+
+def setup_workspace(args):
+ print_step("Setting up temporary workspace.")
+ if args.output_format in (OutputFormat.directory, OutputFormat.subvolume):
+ d = tempfile.TemporaryDirectory(dir=os.path.dirname(args.output), prefix='.mkosi-')
+ else:
+ d = tempfile.TemporaryDirectory(dir='/var/tmp', prefix='mkosi-')
+
+ print_step("Temporary workspace in " + d.name + " is now set up.")
+ return d
+
+def btrfs_subvol_create(path, mode=0o755):
+ m = os.umask(~mode & 0o7777)
+ subprocess.run(["btrfs", "subvol", "create", path], check=True)
+ os.umask(m)
+
+def btrfs_subvol_delete(path):
+ subprocess.run(["btrfs", "subvol", "delete", path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)
+
+def btrfs_subvol_make_ro(path, b=True):
+ subprocess.run(["btrfs", "property", "set", path, "ro", "true" if b else "false"], check=True)
+
+def image_size(args):
+ size = GPT_HEADER_SIZE + GPT_FOOTER_SIZE
+
+ if args.root_size is not None:
+ size += args.root_size
+ if args.home_size is not None:
+ size += args.home_size
+ if args.srv_size is not None:
+ size += args.srv_size
+ if args.bootable:
+ size += args.esp_size
+ if args.swap_size is not None:
+ size += args.swap_size
+ if args.verity_size is not None:
+ size += args.verity_size
+
+ return size
+
+def disable_cow(path):
+ """Disable copy-on-write if applicable on filesystem"""
+
+ subprocess.run(["chattr", "+C", path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=False)
+
+def determine_partition_table(args):
+
+ pn = 1
+ table = "label: gpt\n"
+ run_sfdisk = False
+
+ if args.bootable:
+ table += 'size={}, type={}, name="ESP System Partition"\n'.format(args.esp_size // 512, GPT_ESP)
+ args.esp_partno = pn
+ pn += 1
+ run_sfdisk = True
+ else:
+ args.esp_partno = None
+
+ if args.swap_size is not None:
+ table += 'size={}, type={}, name="Swap Partition"\n'.format(args.swap_size // 512, GPT_SWAP)
+ args.swap_partno = pn
+ pn += 1
+ run_sfdisk = True
+ else:
+ args.swap_partno = None
+
+ args.home_partno = None
+ args.srv_partno = None
+
+ if args.output_format != OutputFormat.raw_btrfs:
+ if args.home_size is not None:
+ table += 'size={}, type={}, name="Home Partition"\n'.format(args.home_size // 512, GPT_HOME)
+ args.home_partno = pn
+ pn += 1
+ run_sfdisk = True
+
+ if args.srv_size is not None:
+ table += 'size={}, type={}, name="Server Data Partition"\n'.format(args.srv_size // 512, GPT_SRV)
+ args.srv_partno = pn
+ pn += 1
+ run_sfdisk = True
+
+ if args.output_format != OutputFormat.raw_squashfs:
+ table += 'type={}, attrs={}, name="Root Partition"\n'.format(GPT_ROOT_NATIVE, "GUID:60" if args.read_only and args.output_format != OutputFormat.raw_btrfs else "")
+ run_sfdisk = True
+
+ args.root_partno = pn
+ pn += 1
+
+ if args.verity:
+ args.verity_partno = pn
+ pn += 1
+ else:
+ args.verity_partno = None
+
+ return table, run_sfdisk
+
+
+def create_image(args, workspace, for_cache):
+ if args.output_format not in (OutputFormat.raw_gpt, OutputFormat.raw_btrfs, OutputFormat.raw_squashfs):
+ return None
+
+ with complete_step('Creating partition table',
+ 'Created partition table as {.name}') as output:
+
+ f = tempfile.NamedTemporaryFile(dir=os.path.dirname(args.output), prefix='.mkosi-', delete=not for_cache)
+ output.append(f)
+ disable_cow(f.name)
+ f.truncate(image_size(args))
+
+ table, run_sfdisk = determine_partition_table(args)
+
+ if run_sfdisk:
+ subprocess.run(["sfdisk", "--color=never", f.name], input=table.encode("utf-8"), check=True)
+ subprocess.run(["sync"])
+
+ args.ran_sfdisk = run_sfdisk
+
+ return f
+
+def reuse_cache_image(args, workspace, run_build_script, for_cache):
+
+ if not args.incremental:
+ return None, False
+ if for_cache:
+ return None, False
+ if args.output_format not in (OutputFormat.raw_gpt, OutputFormat.raw_btrfs):
+ return None, False
+
+ fname = args.cache_pre_dev if run_build_script else args.cache_pre_inst
+ if fname is None:
+ return None, False
+
+ with complete_step('Basing off cached image ' + fname,
+ 'Copied cached image as {.name}') as output:
+
+ try:
+ source = open(fname, "rb")
+ except FileNotFoundError:
+ return None, False
+
+ with source:
+ f = tempfile.NamedTemporaryFile(dir = os.path.dirname(args.output), prefix='.mkosi-')
+ output.append(f)
+ disable_cow(f.name)
+ shutil.copyfileobj(source, f)
+
+ table, run_sfdisk = determine_partition_table(args)
+ args.ran_sfdisk = run_sfdisk
+
+ return f, True
+
+@contextlib.contextmanager
+def attach_image_loopback(args, raw):
+ if raw is None:
+ yield None
+ return
+
+ with complete_step('Attaching image file',
+ 'Attached image file as {}') as output:
+ c = subprocess.run(["losetup", "--find", "--show", "--partscan", raw.name],
+ stdout=subprocess.PIPE, check=True)
+ loopdev = c.stdout.decode("utf-8").strip()
+ output.append(loopdev)
+
+ try:
+ yield loopdev
+ finally:
+ with complete_step('Detaching image file'):
+ subprocess.run(["losetup", "--detach", loopdev], check=True)
+
+def partition(loopdev, partno):
+ if partno is None:
+ return None
+
+ return loopdev + "p" + str(partno)
+
+def prepare_swap(args, loopdev, cached):
+ if loopdev is None:
+ return
+ if cached:
+ return
+ if args.swap_partno is None:
+ return
+
+ with complete_step('Formatting swap partition'):
+ subprocess.run(["mkswap", "-Lswap", partition(loopdev, args.swap_partno)],
+ check=True)
+
+def prepare_esp(args, loopdev, cached):
+ if loopdev is None:
+ return
+ if cached:
+ return
+ if args.esp_partno is None:
+ return
+
+ with complete_step('Formatting ESP partition'):
+ subprocess.run(["mkfs.fat", "-nEFI", "-F32", partition(loopdev, args.esp_partno)],
+ check=True)
+
+def mkfs_ext4(label, mount, dev):
+ subprocess.run(["mkfs.ext4", "-L", label, "-M", mount, dev], check=True)
+
+def mkfs_btrfs(label, dev):
+ subprocess.run(["mkfs.btrfs", "-L", label, "-d", "single", "-m", "single", dev], check=True)
+
+def luks_format(dev, passphrase):
+
+ if passphrase['type'] == 'stdin':
+ passphrase = (passphrase['content'] + "\n").encode("utf-8")
+ subprocess.run(["cryptsetup", "luksFormat", "--batch-mode", dev], input=passphrase, check=True)
+ else:
+ assert passphrase['type'] == 'file'
+ subprocess.run(["cryptsetup", "luksFormat", "--batch-mode", dev, passphrase['content']], check=True)
+
+def luks_open(dev, passphrase):
+
+ name = str(uuid.uuid4())
+
+ if passphrase['type'] == 'stdin':
+ passphrase = (passphrase['content'] + "\n").encode("utf-8")
+ subprocess.run(["cryptsetup", "open", "--type", "luks", dev, name], input=passphrase, check=True)
+ else:
+ assert passphrase['type'] == 'file'
+ subprocess.run(["cryptsetup", "--key-file", passphrase['content'], "open", "--type", "luks", dev, name], check=True)
+
+ return os.path.join("/dev/mapper", name)
+
+def luks_close(dev, text):
+ if dev is None:
+ return
+
+ with complete_step(text):
+ subprocess.run(["cryptsetup", "close", dev], check=True)
+
+def luks_format_root(args, loopdev, run_build_script, cached, inserting_squashfs=False):
+
+ if args.encrypt != "all":
+ return
+ if args.root_partno is None:
+ return
+ if args.output_format == OutputFormat.raw_squashfs and not inserting_squashfs:
+ return
+ if run_build_script:
+ return
+ if cached:
+ return
+
+ with complete_step("LUKS formatting root partition"):
+ luks_format(partition(loopdev, args.root_partno), args.passphrase)
+
+def luks_format_home(args, loopdev, run_build_script, cached):
+
+ if args.encrypt is None:
+ return
+ if args.home_partno is None:
+ return
+ if run_build_script:
+ return
+ if cached:
+ return
+
+ with complete_step("LUKS formatting home partition"):
+ luks_format(partition(loopdev, args.home_partno), args.passphrase)
+
+def luks_format_srv(args, loopdev, run_build_script, cached):
+
+ if args.encrypt is None:
+ return
+ if args.srv_partno is None:
+ return
+ if run_build_script:
+ return
+ if cached:
+ return
+
+ with complete_step("LUKS formatting server data partition"):
+ luks_format(partition(loopdev, args.srv_partno), args.passphrase)
+
+def luks_setup_root(args, loopdev, run_build_script, inserting_squashfs=False):
+
+ if args.encrypt != "all":
+ return None
+ if args.root_partno is None:
+ return None
+ if args.output_format == OutputFormat.raw_squashfs and not inserting_squashfs:
+ return None
+ if run_build_script:
+ return None
+
+ with complete_step("Opening LUKS root partition"):
+ return luks_open(partition(loopdev, args.root_partno), args.passphrase)
+
+def luks_setup_home(args, loopdev, run_build_script):
+
+ if args.encrypt is None:
+ return None
+ if args.home_partno is None:
+ return None
+ if run_build_script:
+ return None
+
+ with complete_step("Opening LUKS home partition"):
+ return luks_open(partition(loopdev, args.home_partno), args.passphrase)
+
+def luks_setup_srv(args, loopdev, run_build_script):
+
+ if args.encrypt is None:
+ return None
+ if args.srv_partno is None:
+ return None
+ if run_build_script:
+ return None
+
+ with complete_step("Opening LUKS server data partition"):
+ return luks_open(partition(loopdev, args.srv_partno), args.passphrase)
+
+@contextlib.contextmanager
+def luks_setup_all(args, loopdev, run_build_script):
+
+ if args.output_format in (OutputFormat.directory, OutputFormat.subvolume, OutputFormat.tar):
+ yield (None, None, None)
+ return
+
+ try:
+ root = luks_setup_root(args, loopdev, run_build_script)
+ try:
+ home = luks_setup_home(args, loopdev, run_build_script)
+ try:
+ srv = luks_setup_srv(args, loopdev, run_build_script)
+
+ yield (partition(loopdev, args.root_partno) if root is None else root, \
+ partition(loopdev, args.home_partno) if home is None else home, \
+ partition(loopdev, args.srv_partno) if srv is None else srv)
+ finally:
+ luks_close(srv, "Closing LUKS server data partition")
+ finally:
+ luks_close(home, "Closing LUKS home partition")
+ finally:
+ luks_close(root, "Closing LUKS root partition")
+
+def prepare_root(args, dev, cached):
+ if dev is None:
+ return
+ if args.output_format == OutputFormat.raw_squashfs:
+ return
+ if cached:
+ return
+
+ with complete_step('Formatting root partition'):
+ if args.output_format == OutputFormat.raw_btrfs:
+ mkfs_btrfs("root", dev)
+ else:
+ mkfs_ext4("root", "/", dev)
+
+def prepare_home(args, dev, cached):
+ if dev is None:
+ return
+ if cached:
+ return
+
+ with complete_step('Formatting home partition'):
+ mkfs_ext4("home", "/home", dev)
+
+def prepare_srv(args, dev, cached):
+ if dev is None:
+ return
+ if cached:
+ return
+
+ with complete_step('Formatting server data partition'):
+ mkfs_ext4("srv", "/srv", dev)
+
+def mount_loop(args, dev, where, read_only=False):
+ os.makedirs(where, 0o755, True)
+
+ options = "-odiscard"
+
+ if args.compress and args.output_format == OutputFormat.raw_btrfs:
+ options += ",compress"
+
+ if read_only:
+ options += ",ro"
+
+ subprocess.run(["mount", "-n", dev, where, options], check=True)
+
+def mount_bind(what, where):
+ os.makedirs(where, 0o755, True)
+ subprocess.run(["mount", "--bind", what, where], check=True)
+
+def mount_tmpfs(where):
+ os.makedirs(where, 0o755, True)
+ subprocess.run(["mount", "tmpfs", "-t", "tmpfs", where], check=True)
+
+@contextlib.contextmanager
+def mount_image(args, workspace, loopdev, root_dev, home_dev, srv_dev, root_read_only=False):
+ if loopdev is None:
+ yield None
+ return
+
+ with complete_step('Mounting image'):
+ root = os.path.join(workspace, "root")
+
+ if args.output_format != OutputFormat.raw_squashfs:
+ mount_loop(args, root_dev, root, root_read_only)
+
+ if home_dev is not None:
+ mount_loop(args, home_dev, os.path.join(root, "home"))
+
+ if srv_dev is not None:
+ mount_loop(args, srv_dev, os.path.join(root, "srv"))
+
+ if args.esp_partno is not None:
+ mount_loop(args, partition(loopdev, args.esp_partno), os.path.join(root, "efi"))
+
+ # Make sure /tmp and /run are not part of the image
+ mount_tmpfs(os.path.join(root, "run"))
+ mount_tmpfs(os.path.join(root, "tmp"))
+
+ try:
+ yield
+ finally:
+ with complete_step('Unmounting image'):
+
+ for d in ("home", "srv", "efi", "run", "tmp"):
+ umount(os.path.join(root, d))
+
+ umount(root)
+
+@complete_step("Assigning hostname")
+def assign_hostname(args, workspace):
+ root = os.path.join(workspace, "root")
+ hostname_path = os.path.join(root, "etc/hostname")
+
+ if os.path.isfile(hostname_path):
+ os.remove(hostname_path)
+
+ if args.hostname:
+ if os.path.islink(hostname_path) or os.path.isfile(hostname_path):
+ os.remove(hostname_path)
+ with open(hostname_path, "w+") as f:
+ f.write("{}\n".format(args.hostname))
+
+@contextlib.contextmanager
+def mount_api_vfs(args, workspace):
+ paths = ('/proc', '/dev', '/sys')
+ root = os.path.join(workspace, "root")
+
+ with complete_step('Mounting API VFS'):
+ for d in paths:
+ mount_bind(d, root + d)
+ try:
+ yield
+ finally:
+ with complete_step('Unmounting API VFS'):
+ for d in paths:
+ umount(root + d)
+
+@contextlib.contextmanager
+def mount_cache(args, workspace):
+
+ if args.cache_path is None:
+ yield
+ return
+
+ # We can't do this in mount_image() yet, as /var itself might have to be created as a subvolume first
+ with complete_step('Mounting Package Cache'):
+ if args.distribution in (Distribution.fedora, Distribution.mageia):
+ mount_bind(args.cache_path, os.path.join(workspace, "root", "var/cache/dnf"))
+ elif args.distribution in (Distribution.debian, Distribution.ubuntu):
+ mount_bind(args.cache_path, os.path.join(workspace, "root", "var/cache/apt/archives"))
+ elif args.distribution == Distribution.arch:
+ mount_bind(args.cache_path, os.path.join(workspace, "root", "var/cache/pacman/pkg"))
+ elif args.distribution == Distribution.opensuse:
+ mount_bind(args.cache_path, os.path.join(workspace, "root", "var/cache/zypp/packages"))
+ try:
+ yield
+ finally:
+ with complete_step('Unmounting Package Cache'):
+ for d in ("var/cache/dnf", "var/cache/apt/archives", "var/cache/pacman/pkg", "var/cache/zypp/packages"):
+ umount(os.path.join(workspace, "root", d))
+
+def umount(where):
+ # Ignore failures and error messages
+ subprocess.run(["umount", "-n", where], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+
+@complete_step('Setting up basic OS tree')
+def prepare_tree(args, workspace, run_build_script, cached):
+
+ if args.output_format == OutputFormat.subvolume:
+ btrfs_subvol_create(os.path.join(workspace, "root"))
+ else:
+ try:
+ os.mkdir(os.path.join(workspace, "root"))
+ except FileExistsError:
+ pass
+
+ if args.output_format in (OutputFormat.subvolume, OutputFormat.raw_btrfs):
+
+ if cached and args.output_format is OutputFormat.raw_btrfs:
+ return
+
+ btrfs_subvol_create(os.path.join(workspace, "root", "home"))
+ btrfs_subvol_create(os.path.join(workspace, "root", "srv"))
+ btrfs_subvol_create(os.path.join(workspace, "root", "var"))
+ btrfs_subvol_create(os.path.join(workspace, "root", "var/tmp"), 0o1777)
+ os.mkdir(os.path.join(workspace, "root", "var/lib"))
+ btrfs_subvol_create(os.path.join(workspace, "root", "var/lib/machines"), 0o700)
+
+ if cached:
+ return
+
+ if args.bootable:
+ # We need an initialized machine ID for the boot logic to work
+ os.mkdir(os.path.join(workspace, "root", "etc"), 0o755)
+ open(os.path.join(workspace, "root", "etc/machine-id"), "w").write(args.machine_id + "\n")
+
+ os.mkdir(os.path.join(workspace, "root", "efi/EFI"), 0o700)
+ os.mkdir(os.path.join(workspace, "root", "efi/EFI/BOOT"), 0o700)
+ os.mkdir(os.path.join(workspace, "root", "efi/EFI/Linux"), 0o700)
+ os.mkdir(os.path.join(workspace, "root", "efi/EFI/systemd"), 0o700)
+ os.mkdir(os.path.join(workspace, "root", "efi/loader"), 0o700)
+ os.mkdir(os.path.join(workspace, "root", "efi/loader/entries"), 0o700)
+ os.mkdir(os.path.join(workspace, "root", "efi", args.machine_id), 0o700)
+
+ os.mkdir(os.path.join(workspace, "root", "boot"), 0o700)
+ os.symlink("../efi", os.path.join(workspace, "root", "boot/efi"))
+ os.symlink("efi/loader", os.path.join(workspace, "root", "boot/loader"))
+ os.symlink("efi/" + args.machine_id, os.path.join(workspace, "root", "boot", args.machine_id))
+
+ os.mkdir(os.path.join(workspace, "root", "etc/kernel"), 0o755)
+
+ with open(os.path.join(workspace, "root", "etc/kernel/cmdline"), "w") as cmdline:
+ cmdline.write(args.kernel_commandline)
+ cmdline.write("\n")
+
+ if run_build_script:
+ os.mkdir(os.path.join(workspace, "root", "root"), 0o750)
+ os.mkdir(os.path.join(workspace, "root", "root/dest"), 0o755)
+
+def patch_file(filepath, line_rewriter):
+ temp_new_filepath = filepath + ".tmp.new"
+
+ with open(filepath, "r") as old:
+ with open(temp_new_filepath, "w") as new:
+ for line in old:
+ new.write(line_rewriter(line))
+
+ shutil.copystat(filepath, temp_new_filepath)
+ os.remove(filepath)
+ shutil.move(temp_new_filepath, filepath)
+
+def fix_hosts_line_in_nsswitch(line):
+ if line.startswith("hosts:"):
+ sources = line.split(" ")
+ if 'resolve' not in sources:
+ return " ".join(["resolve" if w == "dns" else w for w in sources])
+ return line
+
+def enable_networkd(workspace):
+ subprocess.run(["systemctl",
+ "--root", os.path.join(workspace, "root"),
+ "enable", "systemd-networkd", "systemd-resolved"],
+ check=True)
+
+ os.remove(os.path.join(workspace, "root", "etc/resolv.conf"))
+ os.symlink("../usr/lib/systemd/resolv.conf", os.path.join(workspace, "root", "etc/resolv.conf"))
+
+ patch_file(os.path.join(workspace, "root", "etc/nsswitch.conf"), fix_hosts_line_in_nsswitch)
+
+ with open(os.path.join(workspace, "root", "etc/systemd/network/all-ethernet.network"), "w") as f:
+ f.write("""\
+[Match]
+Type=ether
+
+[Network]
+DHCP=yes
+""")
+
+def run_workspace_command(args, workspace, *cmd, network=False, env={}):
+
+ cmdline = ["systemd-nspawn",
+ '--quiet',
+ "--directory=" + os.path.join(workspace, "root"),
+ "--uuid=" + args.machine_id,
+ "--machine=mkosi-" + uuid.uuid4().hex,
+ "--as-pid2",
+ "--register=no",
+ "--bind=" + var_tmp(workspace) + ":/var/tmp" ]
+
+ if not network:
+ cmdline += ["--private-network"]
+
+ cmdline += [ "--setenv={}={}".format(k,v) for k,v in env.items() ]
+
+ cmdline += ['--', *cmd]
+ subprocess.run(cmdline, check=True)
+
+def check_if_url_exists(url):
+ req = urllib.request.Request(url, method="HEAD")
+ try:
+ if urllib.request.urlopen(req):
+ return True
+ except:
+ return False
+
+def disable_kernel_install(args, workspace):
+
+ # Let's disable the automatic kernel installation done by the
+ # kernel RPMs. After all, we want to built our own unified kernels
+ # that include the root hash in the kernel command line and can be
+ # signed as a single EFI executable. Since the root hash is only
+ # known when the root file system is finalized we turn off any
+ # kernel installation beforehand.
+
+ if not args.bootable:
+ return
+
+ for d in ("etc", "etc/kernel", "etc/kernel/install.d"):
+ try:
+ os.mkdir(os.path.join(workspace, "root", d), 0o755)
+ except FileExistsError:
+ pass
+
+ for f in ("50-dracut.install", "51-dracut-rescue.install", "90-loaderentry.install"):
+ os.symlink("/dev/null", os.path.join(workspace, "root", "etc/kernel/install.d", f))
+
+def invoke_dnf(args, workspace, repositories, base_packages, boot_packages):
+
+ repos = ["--enablerepo=" + repo for repo in repositories]
+
+ root = os.path.join(workspace, "root")
+ cmdline = ["dnf",
+ "-y",
+ "--config=" + os.path.join(workspace, "dnf.conf"),
+ "--best",
+ "--allowerasing",
+ "--releasever=" + args.release,
+ "--installroot=" + root,
+ "--disablerepo=*",
+ *repos,
+ "--setopt=keepcache=1",
+ "--setopt=install_weak_deps=0"]
+
+ # Turn off docs, but not during the development build, as dnf currently has problems with that
+ if not args.with_docs and not run_build_script:
+ cmdline.append("--setopt=tsflags=nodocs")
+
+ cmdline.extend([
+ "install",
+ *base_packages
+ ])
+
+ if args.packages is not None:
+ cmdline.extend(args.packages)
+
+ if run_build_script and args.build_packages is not None:
+ cmdline.extend(args.build_packages)
+
+ if args.bootable:
+ cmdline.extend(boot_packages)
+
+ # Temporary hack: dracut only adds crypto support to the initrd, if the cryptsetup binary is installed
+ if args.encrypt or args.verity:
+ cmdline.append("cryptsetup")
+
+ if args.output_format == OutputFormat.raw_gpt:
+ cmdline.append("e2fsprogs")
+
+ if args.output_format == OutputFormat.raw_btrfs:
+ cmdline.append("btrfs-progs")
+
+ with mount_api_vfs(args, workspace):
+ subprocess.run(cmdline, check=True)
+
+@complete_step('Installing Fedora')
+def install_fedora(args, workspace, run_build_script):
+
+ disable_kernel_install(args, workspace)
+
+ gpg_key = "/etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-%s-x86_64" % args.release
+ if os.path.exists(gpg_key):
+ gpg_key = "file://%s" % gpg_key
+ else:
+ gpg_key = "https://getfedora.org/static/%s.txt" % FEDORA_KEYS_MAP[args.release]
+
+ if args.mirror:
+ baseurl = "{args.mirror}/releases/{args.release}/Everything/x86_64/os/".format(args=args)
+ if not check_if_url_exists("%s/media.repo" % baseurl):
+ baseurl = "{args.mirror}/development/{args.release}/Everything/x86_64/os/".format(args=args)
+
+ release_url = "baseurl=%s" % baseurl
+ updates_url = "baseurl={args.mirror}/updates/{args.release}/x86_64/".format(args=args)
+ else:
+ release_url = ("metalink=https://mirrors.fedoraproject.org/metalink?" +
+ "repo=fedora-{args.release}&arch=x86_64".format(args=args))
+ updates_url = ("metalink=https://mirrors.fedoraproject.org/metalink?" +
+ "repo=updates-released-f{args.release}&arch=x86_64".format(args=args))
+
+ with open(os.path.join(workspace, "dnf.conf"), "w") as f:
+ f.write("""\
+[main]
+gpgcheck=1
+
+[fedora]
+name=Fedora {args.release} - base
+{release_url}
+gpgkey={gpg_key}
+
+[updates]
+name=Fedora {args.release} - updates
+{updates_url}
+gpgkey={gpg_key}
+""".format(args=args,
+ gpg_key=gpg_key,
+ release_url=release_url,
+ updates_url=updates_url))
+
+ invoke_dnf(args, workspace,
+ args.repositories if args.repositories else ["fedora", "updates"],
+ ["systemd", "fedora-release", "passwd"],
+ ["kernel", "systemd-udev", "binutils"])
+
+@complete_step('Installing Mageia')
+def install_mageia(args, workspace, run_build_script):
+
+ disable_kernel_install(args, workspace)
+
+ # Mageia does not (yet) have RPM GPG key on the web
+ gpg_key = '/etc/pki/rpm-gpg/RPM-GPG-KEY-Mageia'
+ if os.path.exists(gpg_key):
+ gpg_key = "file://%s" % gpg_key
+# else:
+# gpg_key = "https://getfedora.org/static/%s.txt" % FEDORA_KEYS_MAP[args.release]
+
+ if args.mirror:
+ baseurl = "{args.mirror}/distrib/{args.release}/x86_64/media/core/".format(args=args)
+ release_url = "baseurl=%s/release/" % baseurl
+ updates_url = "baseurl=%s/updates/" % baseurl
+ else:
+ baseurl = "https://www.mageia.org/mirrorlist/?release={args.release}&arch=x86_64&section=core".format(args=args)
+ release_url = "mirrorlist=%s&repo=release" % baseurl
+ updates_url = "mirrorlist=%s&repo=updates" % baseurl
+
+ with open(os.path.join(workspace, "dnf.conf"), "w") as f:
+ f.write("""\
+[main]
+gpgcheck=1
+
+[mageia]
+name=Mageia {args.release} Core Release
+{release_url}
+gpgkey={gpg_key}
+
+[updates]
+name=Mageia {args.release} Core Updates
+{updates_url}
+gpgkey={gpg_key}
+""".format(args=args,
+ gpg_key=gpg_key,
+ release_url=release_url,
+ updates_url=updates_url))
+
+ invoke_dnf(args, workspace,
+ args.repositories if args.repositories else ["mageia", "updates"],
+ ["basesystem-minimal"],
+ ["kernel-server-latest", "binutils"])
+
+def install_debian_or_ubuntu(args, workspace, run_build_script, mirror):
+ if args.repositories:
+ components = ','.join(args.repositories)
+ else:
+ components = 'main'
+ cmdline = ["debootstrap",
+ "--verbose",
+ "--merged-usr",
+ "--variant=minbase",
+ "--include=systemd-sysv",
+ "--exclude=sysv-rc,initscripts,startpar,lsb-base,insserv",
+ "--components=" + components,
+ args.release,
+ workspace + "/root",
+ mirror]
+ if args.bootable and args.output_format == OutputFormat.raw_btrfs:
+ cmdline[4] += ",btrfs-tools"
+
+ subprocess.run(cmdline, check=True)
+
+ # Debootstrap is not smart enough to deal correctly with alternative dependencies
+ # Installing libpam-systemd via debootstrap results in systemd-shim being installed
+ # Therefore, prefer to install via apt from inside the container
+ extra_packages = [ 'dbus', 'libpam-systemd']
+
+ # Also install extra packages via the secondary APT run, because it is smarter and
+ # can deal better with any conflicts
+ if args.packages is not None:
+ extra_packages += args.packages
+
+ if run_build_script and args.build_packages is not None:
+ extra_packages += args.build_packages
+
+ # Work around debian bug #835628
+ os.makedirs(os.path.join(workspace, "root/etc/dracut.conf.d"), exist_ok=True)
+ with open(os.path.join(workspace, "root/etc/dracut.conf.d/99-generic.conf"), "w") as f:
+ f.write("hostonly=no")
+
+ if args.bootable:
+ extra_packages += ["linux-image-amd64", "dracut"]
+
+ if extra_packages:
+ # Debian policy is to start daemons by default.
+ # The policy-rc.d script can be used choose which ones to start
+ # Let's install one that denies all daemon startups
+ # See https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt
+ # Note: despite writing in /usr/sbin, this file is not shipped by the OS
+ # and instead should be managed by the admin.
+ policyrcd = os.path.join(workspace, "root/usr/sbin/policy-rc.d")
+ with open(policyrcd, "w") as f:
+ f.write("#!/bin/sh\n")
+ f.write("exit 101")
+ os.chmod(policyrcd, 0o755)
+ if not args.with_docs:
+ # Create dpkg.cfg to ingore documentation
+ dpkg_conf = os.path.join(workspace, "root/etc/dpkg/dpkg.cfg.d/01_nodoc")
+ with open(dpkg_conf, "w") as f:
+ f.writelines([
+ 'path-exclude /usr/share/locale/*\n',
+ 'path-exclude /usr/share/doc/*\n',
+ 'path-exclude /usr/share/man/*\n',
+ 'path-exclude /usr/share/groff/*\n',
+ 'path-exclude /usr/share/info/*\n',
+ 'path-exclude /usr/share/lintian/*\n',
+ 'path-exclude /usr/share/linda/*\n',
+ ])
+
+ cmdline = ["/usr/bin/apt-get", "--assume-yes", "--no-install-recommends", "install"] + extra_packages
+ run_workspace_command(args, workspace, network=True, env={'DEBIAN_FRONTEND': 'noninteractive', 'DEBCONF_NONINTERACTIVE_SEEN': 'true'}, *cmdline)
+ os.unlink(policyrcd)
+
+@complete_step('Installing Debian')
+def install_debian(args, workspace, run_build_script):
+ install_debian_or_ubuntu(args, workspace, run_build_script, args.mirror)
+
+@complete_step('Installing Ubuntu')
+def install_ubuntu(args, workspace, run_build_script):
+ install_debian_or_ubuntu(args, workspace, run_build_script, args.mirror)
+
+@complete_step('Installing Arch Linux')
+def install_arch(args, workspace, run_build_script):
+ if args.release is not None:
+ sys.stderr.write("Distribution release specification is not supported for Arch Linux, ignoring.\n")
+
+ keyring = "archlinux"
+
+ if platform.machine() == "aarch64":
+ keyring += "arm"
+
+ subprocess.run(["pacman-key", "--nocolor", "--init"], check=True)
+ subprocess.run(["pacman-key", "--nocolor", "--populate", keyring], check=True)
+
+ if platform.machine() == "aarch64":
+ server = "Server = {}/$arch/$repo".format(args.mirror)
+ else:
+ server = "Server = {}/$repo/os/$arch".format(args.mirror)
+
+ with open(os.path.join(workspace, "pacman.conf"), "w") as f:
+ f.write("""\
+[options]
+LogFile = /dev/null
+HookDir = /no_hook/
+HoldPkg = pacman glibc
+Architecture = auto
+UseSyslog
+Color
+CheckSpace
+SigLevel = Required DatabaseOptional
+
+[core]
+{server}
+
+[extra]
+{server}
+
+[community]
+{server}
+""".format(args=args, server=server))
+
+ subprocess.run(["pacman", "--color", "never", "--config", os.path.join(workspace, "pacman.conf"), "-Sy"], check=True)
+ c = subprocess.run(["pacman", "--color", "never", "--config", os.path.join(workspace, "pacman.conf"), "-Sg", "base"], stdout=subprocess.PIPE, universal_newlines=True, check=True)
+ packages = set(c.stdout.split())
+ packages.remove("base")
+
+ packages -= {"cryptsetup",
+ "device-mapper",
+ "dhcpcd",
+ "e2fsprogs",
+ "jfsutils",
+ "lvm2",
+ "man-db",
+ "man-pages",
+ "mdadm",
+ "netctl",
+ "pcmciautils",
+ "reiserfsprogs",
+ "xfsprogs"}
+
+ if args.bootable:
+ if args.output_format == OutputFormat.raw_gpt:
+ packages.add("e2fsprogs")
+ elif args.output_format == OutputFormat.raw_btrfs:
+ packages.add("btrfs-progs")
+ else:
+ if "linux" in packages:
+ packages.remove("linux")
+
+ if args.packages is not None:
+ packages |= set(args.packages)
+
+ if run_build_script and args.build_packages is not None:
+ packages |= set(args.build_packages)
+
+ cmdline = ["pacstrap",
+ "-C", os.path.join(workspace, "pacman.conf"),
+ "-d",
+ workspace + "/root"] + \
+ list(packages)
+
+ subprocess.run(cmdline, check=True)
+
+ enable_networkd(workspace)
+
+@complete_step('Installing openSUSE')
+def install_opensuse(args, workspace, run_build_script):
+
+ root = os.path.join(workspace, "root")
+ release = args.release.strip('"')
+
+ #
+ # If the release looks like a timestamp, it's Tumbleweed.
+ # 13.x is legacy (14.x won't ever appear). For anything else,
+ # let's default to Leap.
+ #
+ if release.isdigit() or release == "tumbleweed":
+ release_url = "{}/tumbleweed/repo/oss/".format(args.mirror)
+ updates_url = "{}/update/tumbleweed/".format(args.mirror)
+ elif release.startswith("13."):
+ release_url = "{}/distribution/{}/repo/oss/".format(args.mirror, release)
+ updates_url = "{}/update/{}/".format(args.mirror, release)
+ else:
+ release_url = "{}/distribution/leap/{}/repo/oss/".format(args.mirror, release)
+ updates_url = "{}/update/leap/{}/oss/".format(args.mirror, release)
+
+ #
+ # Configure the repositories: we need to enable packages caching
+ # here to make sure that the package cache stays populated after
+ # "zypper install".
+ #
+ subprocess.run(["zypper", "--root", root, "addrepo", "-ck", release_url, "Main"], check=True)
+ subprocess.run(["zypper", "--root", root, "addrepo", "-ck", updates_url, "Updates"], check=True)
+
+ if not args.with_docs:
+ with open(os.path.join(root, "etc/zypp/zypp.conf"), "w") as f:
+ f.write("rpm.install.excludedocs = yes\n")
+
+ # The common part of the install comand.
+ cmdline = ["zypper", "--root", root, "--gpg-auto-import-keys",
+ "install", "-y", "--no-recommends"]
+ #
+ # Install the "minimal" package set.
+ #
+ subprocess.run(cmdline + ["-t", "pattern", "minimal_base"], check=True)
+
+ #
+ # Now install the additional packages if necessary.
+ #
+ extra_packages = []
+
+ if args.bootable:
+ extra_packages += ["kernel-default"]
+
+ if args.encrypt:
+ extra_packages += ["device-mapper"]
+
+ if args.output_format in (OutputFormat.subvolume, OutputFormat.raw_btrfs):
+ extra_packages += ["btrfsprogs"]
+
+ if args.packages:
+ extra_packages += args.packages
+
+ if run_build_script and args.build_packages is not None:
+ extra_packages += args.build_packages
+
+ if extra_packages:
+ subprocess.run(cmdline + extra_packages, check=True)
+
+ #
+ # Disable packages caching in the image that was enabled
+ # previously to populate the package cache.
+ #
+ subprocess.run(["zypper", "--root", root, "modifyrepo", "-K", "Main"], check=True)
+ subprocess.run(["zypper", "--root", root, "modifyrepo", "-K", "Updates"], check=True)
+
+ #
+ # Tune dracut confs: openSUSE uses an old version of dracut that's
+ # probably explain why we need to do those hacks.
+ #
+ if args.bootable:
+ os.makedirs(os.path.join(root, "etc/dracut.conf.d"), exist_ok=True)
+
+ with open(os.path.join(root, "etc/dracut.conf.d/99-mkosi.conf"), "w") as f:
+ f.write("hostonly=no\n")
+
+ # dracut from openSUSE is missing upstream commit 016613c774baf.
+ with open(os.path.join(root, "etc/kernel/cmdline"), "w") as cmdline:
+ cmdline.write(args.kernel_commandline + " root=/dev/gpt-auto-root\n")
+
+def install_distribution(args, workspace, run_build_script, cached):
+
+ if cached:
+ return
+
+ install = {
+ Distribution.fedora : install_fedora,
+ Distribution.mageia : install_mageia,
+ Distribution.debian : install_debian,
+ Distribution.ubuntu : install_ubuntu,
+ Distribution.arch : install_arch,
+ Distribution.opensuse : install_opensuse,
+ }
+
+ install[args.distribution](args, workspace, run_build_script)
+ assign_hostname(args, workspace)
+
+def reset_machine_id(args, workspace, run_build_script, for_cache):
+ """Make /etc/machine-id an empty file.
+
+ This way, on the next boot is either initialized and commited (if /etc is
+ writable) or the image runs with a transient machine ID, that changes on
+ each boot (if the image is read-only).
+ """
+
+ if run_build_script:
+ return
+ if for_cache:
+ return
+
+ with complete_step('Resetting machine ID'):
+ machine_id = os.path.join(workspace, 'root', 'etc/machine-id')
+ os.unlink(machine_id)
+ open(machine_id, "w+b").close()
+ dbus_machine_id = os.path.join(workspace, 'root', 'var/lib/dbus/machine-id')
+ try:
+ os.unlink(dbus_machine_id)
+ except FileNotFoundError:
+ pass
+ else:
+ os.symlink('../../../etc/machine-id', dbus_machine_id)
+
+def set_root_password(args, workspace, run_build_script, for_cache):
+ "Set the root account password, or just delete it so it's easy to log in"
+
+ if run_build_script:
+ return
+ if for_cache:
+ return
+
+ if args.password == '':
+ print_step("Deleting root password...")
+ jj = lambda line: (':'.join(['root', ''] + line.split(':')[2:])
+ if line.startswith('root:') else line)
+ patch_file(os.path.join(workspace, 'root', 'etc/passwd'), jj)
+ elif args.password:
+ print_step("Setting root password...")
+ password = crypt.crypt(args.password, crypt.mksalt(crypt.METHOD_SHA512))
+ jj = lambda line: (':'.join(['root', password] + line.split(':')[2:])
+ if line.startswith('root:') else line)
+ patch_file(os.path.join(workspace, 'root', 'etc/shadow'), jj)
+
+def run_postinst_script(args, workspace, run_build_script, for_cache):
+
+ if args.postinst_script is None:
+ return
+ if for_cache:
+ return
+
+ with complete_step('Running post installation script'):
+
+ # We copy the postinst script into the build tree. We'd prefer
+ # mounting it into the tree, but for that we'd need a good
+ # place to mount it to. But if we create that we might as well
+ # just copy the file anyway.
+
+ shutil.copy2(args.postinst_script,
+ os.path.join(workspace, "root", "root/postinst"))
+
+ run_workspace_command(args, workspace, "/root/postinst", "build" if run_build_script else "final", network=args.with_network)
+ os.unlink(os.path.join(workspace, "root", "root/postinst"))
+
+def install_boot_loader_arch(args, workspace):
+ patch_file(os.path.join(workspace, "root", "etc/mkinitcpio.conf"),
+ lambda line: "HOOKS=\"systemd modconf block filesystems fsck\"\n" if line.startswith("HOOKS=") else line)
+
+ kernel_version = next(filter(lambda x: x[0].isdigit(), os.listdir(os.path.join(workspace, "root", "lib/modules"))))
+
+ run_workspace_command(args, workspace,
+ "/usr/bin/kernel-install", "add", kernel_version, "/boot/vmlinuz-linux")
+
+def install_boot_loader_debian(args, workspace):
+ kernel_version = next(filter(lambda x: x[0].isdigit(), os.listdir(os.path.join(workspace, "root", "lib/modules"))))
+
+ run_workspace_command(args, workspace,
+ "/usr/bin/kernel-install", "add", kernel_version, "/boot/vmlinuz-" + kernel_version)
+
+def install_boot_loader_opensuse(args, workspace):
+ install_boot_loader_debian(args, workspace)
+
+def install_boot_loader(args, workspace, cached):
+ if not args.bootable:
+ return
+
+ if cached:
+ return
+
+ with complete_step("Installing boot loader"):
+ shutil.copyfile(os.path.join(workspace, "root", "usr/lib/systemd/boot/efi/systemd-bootx64.efi"),
+ os.path.join(workspace, "root", "boot/efi/EFI/systemd/systemd-bootx64.efi"))
+
+ shutil.copyfile(os.path.join(workspace, "root", "usr/lib/systemd/boot/efi/systemd-bootx64.efi"),
+ os.path.join(workspace, "root", "boot/efi/EFI/BOOT/bootx64.efi"))
+
+ if args.distribution == Distribution.arch:
+ install_boot_loader_arch(args, workspace)
+
+ if args.distribution == Distribution.debian:
+ install_boot_loader_debian(args, workspace)
+
+ if args.distribution == Distribution.opensuse:
+ install_boot_loader_opensuse(args, workspace)
+
+def enumerate_and_copy(source, dest, suffix = ""):
+ for entry in os.scandir(source + suffix):
+ dest_path = dest + suffix + "/" + entry.name
+
+ if entry.is_dir():
+ os.makedirs(dest_path,
+ mode=entry.stat(follow_symlinks=False).st_mode & 0o7777,
+ exist_ok=True)
+ enumerate_and_copy(source, dest, suffix + "/" + entry.name)
+ else:
+ try:
+ os.unlink(dest_path)
+ except:
+ pass
+
+ shutil.copy(entry.path, dest_path, follow_symlinks=False)
+
+ shutil.copystat(entry.path, dest_path, follow_symlinks=False)
+
+def install_extra_trees(args, workspace, for_cache):
+ if args.extra_trees is None:
+ return
+
+ if for_cache:
+ return
+
+ with complete_step('Copying in extra file trees'):
+ for d in args.extra_trees:
+ enumerate_and_copy(d, os.path.join(workspace, "root"))
+
+def copy_git_files(src, dest, *, git_files):
+ subprocess.run(['git', 'clone', '--depth=1', '--recursive', '--shallow-submodules', src, dest],
+ check=True)
+
+ what_files = ['--exclude-standard', '--modified']
+ if git_files == 'others':
+ what_files += ['--others', '--exclude=.mkosi-*']
+
+ # everything that's modified from the tree
+ c = subprocess.run(['git', '-C', src, 'ls-files', '-z'] + what_files,
+ stdout=subprocess.PIPE,
+ universal_newlines=False,
+ check=True)
+ files = {x.decode("utf-8") for x in c.stdout.rstrip(b'\0').split(b'\0')}
+
+ # everything that's modified and about to be committed
+ c = subprocess.run(['git', '-C', src, 'diff', '--cached', '--name-only', '-z'],
+ stdout=subprocess.PIPE,
+ universal_newlines=False,
+ check=True)
+ files |= {x.decode("utf-8") for x in c.stdout.rstrip(b'\0').split(b'\0')}
+ files.discard('')
+
+ del c
+
+ for path in files:
+ src_path = os.path.join(src, path)
+ dest_path = os.path.join(dest, path)
+
+ directory = os.path.dirname(dest_path)
+ os.makedirs(directory, exist_ok=True)
+
+ shutil.copy2(src_path, dest_path, follow_symlinks=False)
+
+def install_build_src(args, workspace, run_build_script, for_cache):
+ if not run_build_script:
+ return
+ if for_cache:
+ return
+
+ if args.build_script is None:
+ return
+
+ with complete_step('Copying in build script and sources'):
+ shutil.copy(args.build_script,
+ os.path.join(workspace, "root", "root", os.path.basename(args.build_script)))
+
+ if args.build_sources is not None:
+ target = os.path.join(workspace, "root", "root/src")
+ use_git = args.use_git_files
+ if use_git is None:
+ use_git = os.path.exists('.git') or os.path.exists(os.path.join(args.build_sources, '.git'))
+
+ if use_git:
+ copy_git_files(args.build_sources, target, git_files=args.git_files)
+ else:
+ ignore = shutil.ignore_patterns('.git', '.mkosi-*')
+ shutil.copytree(args.build_sources, target, symlinks=True, ignore=ignore)
+
+def install_build_dest(args, workspace, run_build_script, for_cache):
+ if run_build_script:
+ return
+ if for_cache:
+ return
+
+ if args.build_script is None:
+ return
+
+ with complete_step('Copying in build tree'):
+ enumerate_and_copy(os.path.join(workspace, "dest"), os.path.join(workspace, "root"))
+
+def make_read_only(args, workspace, for_cache):
+ if not args.read_only:
+ return
+ if for_cache:
+ return
+
+ if args.output_format not in (OutputFormat.raw_btrfs, OutputFormat.subvolume):
+ return
+
+ with complete_step('Marking root subvolume read-only'):
+ btrfs_subvol_make_ro(os.path.join(workspace, "root"))
+
+def make_tar(args, workspace, run_build_script, for_cache):
+
+ if run_build_script:
+ return None
+ if args.output_format != OutputFormat.tar:
+ return None
+ if for_cache:
+ return None
+
+ with complete_step('Creating archive'):
+ f = tempfile.NamedTemporaryFile(dir=os.path.dirname(args.output), prefix=".mkosi-")
+ subprocess.run(["tar", "-C", os.path.join(workspace, "root"),
+ "-c", "-J", "--xattrs", "--xattrs-include=*", "."],
+ stdout=f, check=True)
+
+ return f
+
+def make_squashfs(args, workspace, for_cache):
+ if args.output_format != OutputFormat.raw_squashfs:
+ return None
+ if for_cache:
+ return None
+
+ with complete_step('Creating squashfs file system'):
+ f = tempfile.NamedTemporaryFile(dir=os.path.dirname(args.output), prefix=".mkosi-squashfs")
+ subprocess.run(["mksquashfs", os.path.join(workspace, "root"), f.name, "-comp", "lz4", "-noappend"],
+ check=True)
+
+ return f
+
+def read_partition_table(loopdev):
+
+ table = []
+ last_sector = 0
+
+ c = subprocess.run(["sfdisk", "--dump", loopdev], stdout=subprocess.PIPE, check=True)
+
+ in_body = False
+ for line in c.stdout.decode("utf-8").split('\n'):
+ stripped = line.strip()
+
+ if stripped == "": # empty line is where the body begins
+ in_body = True
+ continue
+ if not in_body:
+ continue
+
+ table.append(stripped)
+
+ name, rest = stripped.split(":", 1)
+ fields = rest.split(",")
+
+ start = None
+ size = None
+
+ for field in fields:
+ f = field.strip()
+
+ if f.startswith("start="):
+ start = int(f[6:])
+ if f.startswith("size="):
+ size = int(f[5:])
+
+ if start is not None and size is not None:
+ end = start + size
+ if end > last_sector:
+ last_sector = end
+
+ return table, last_sector * 512
+
+def insert_partition(args, workspace, raw, loopdev, partno, blob, name, type_uuid, uuid = None):
+
+ if args.ran_sfdisk:
+ old_table, last_partition_sector = read_partition_table(loopdev)
+ else:
+ # No partition table yet? Then let's fake one...
+ old_table = []
+ last_partition_sector = GPT_HEADER_SIZE
+
+ blob_size = roundup512(os.stat(blob.name).st_size)
+ luks_extra = 2*1024*1024 if args.encrypt == "all" else 0
+ new_size = last_partition_sector + blob_size + luks_extra + GPT_FOOTER_SIZE
+
+ print_step("Resizing disk image to {}...".format(format_bytes(new_size)))
+
+ os.truncate(raw.name, new_size)
+ subprocess.run(["losetup", "--set-capacity", loopdev], check=True)
+
+ print_step("Inserting partition of {}...".format(format_bytes(blob_size)))
+
+ table = "label: gpt\n"
+
+ for t in old_table:
+ table += t + "\n"
+
+ if uuid is not None:
+ table += "uuid=" + str(uuid) + ", "
+
+ table += 'size={}, type={}, attrs=GUID:60, name="{}"\n'.format((blob_size + luks_extra) // 512, type_uuid, name)
+
+ print(table)
+
+ subprocess.run(["sfdisk", "--color=never", loopdev], input=table.encode("utf-8"), check=True)
+ subprocess.run(["sync"])
+
+ print_step("Writing partition...")
+
+ if args.root_partno == partno:
+ luks_format_root(args, loopdev, False, True)
+ dev = luks_setup_root(args, loopdev, False, True)
+ else:
+ dev = None
+
+ try:
+ subprocess.run(["dd", "if=" + blob.name, "of=" + (dev if dev is not None else partition(loopdev, partno))], check=True)
+ finally:
+ luks_close(dev, "Closing LUKS root partition")
+
+ args.ran_sfdisk = True
+
+ return blob_size
+
+def insert_squashfs(args, workspace, raw, loopdev, squashfs, for_cache):
+ if args.output_format != OutputFormat.raw_squashfs:
+ return
+ if for_cache:
+ return
+
+ with complete_step('Inserting squashfs root partition'):
+ args.root_size = insert_partition(args, workspace, raw, loopdev, args.root_partno, squashfs,
+ "Root Partition", GPT_ROOT_NATIVE)
+
+def make_verity(args, workspace, dev, run_build_script, for_cache):
+
+ if run_build_script or not args.verity:
+ return None, None
+ if for_cache:
+ return None, None
+
+ with complete_step('Generating verity hashes'):
+ f = tempfile.NamedTemporaryFile(dir=os.path.dirname(args.output), prefix=".mkosi-")
+ c = subprocess.run(["veritysetup", "format", dev, f.name],
+ stdout=subprocess.PIPE, check=True)
+
+ for line in c.stdout.decode("utf-8").split('\n'):
+ if line.startswith("Root hash:"):
+ root_hash = line[10:].strip()
+ return f, root_hash
+
+ raise ValueError('Root hash not found')
+
+def insert_verity(args, workspace, raw, loopdev, verity, root_hash, for_cache):
+
+ if verity is None:
+ return
+ if for_cache:
+ return
+
+ # Use the final 128 bit of the root hash as partition UUID of the verity partition
+ u = uuid.UUID(root_hash[-32:])
+
+ with complete_step('Inserting verity partition'):
+ insert_partition(args, workspace, raw, loopdev, args.verity_partno, verity,
+ "Verity Partition", GPT_ROOT_NATIVE_VERITY, u)
+
+def patch_root_uuid(args, loopdev, root_hash, for_cache):
+
+ if root_hash is None:
+ return
+ if for_cache:
+ return
+
+ # Use the first 128bit of the root hash as partition UUID of the root partition
+ u = uuid.UUID(root_hash[:32])
+
+ with complete_step('Patching root partition UUID'):
+ subprocess.run(["sfdisk", "--part-uuid", loopdev, str(args.root_partno), str(u)],
+ check=True)
+
+def install_unified_kernel(args, workspace, run_build_script, for_cache, root_hash):
+
+ # Iterates through all kernel versions included in the image and
+ # generates a combined kernel+initrd+cmdline+osrelease EFI file
+ # from it and places it in the /EFI/Linux directory of the
+ # ESP. sd-boot iterates through them and shows them in the
+ # menu. These "unified" single-file images have the benefit that
+ # they can be signed like normal EFI binaries, and can encode
+ # everything necessary to boot a specific root device, including
+ # the root hash.
+
+ if not args.bootable:
+ return
+ if for_cache:
+ return
+
+ if args.distribution not in (Distribution.fedora, Distribution.mageia):
+ return
+
+ with complete_step("Generating combined kernel + initrd boot file"):
+
+ cmdline = args.kernel_commandline
+ if root_hash is not None:
+ cmdline += " roothash=" + root_hash
+
+ for kver in os.scandir(os.path.join(workspace, "root", "usr/lib/modules")):
+ if not kver.is_dir():
+ continue
+
+ boot_binary = "/efi/EFI/Linux/linux-" + kver.name
+ if root_hash is not None:
+ boot_binary += "-" + root_hash
+ boot_binary += ".efi"
+
+ dracut = ["/usr/bin/dracut",
+ "-v",
+ "--no-hostonly",
+ "--uefi",
+ "--kver", kver.name,
+ "--kernel-cmdline", cmdline ]
+
+ # Temporary fix until dracut includes these in the image anyway
+ dracut += ("-i",) + ("/usr/lib/systemd/system/systemd-volatile-root.service",)*2 + \
+ ("-i",) + ("/usr/lib/systemd/systemd-volatile-root",)*2 + \
+ ("-i",) + ("/usr/lib/systemd/systemd-veritysetup",)*2 + \
+ ("-i",) + ("/usr/lib/systemd/system-generators/systemd-veritysetup-generator",)*2
+
+ if args.output_format == OutputFormat.raw_squashfs:
+ dracut += [ '--add-drivers', 'squashfs' ]
+
+ dracut += [ boot_binary ]
+
+ run_workspace_command(args, workspace, *dracut);
+
+def secure_boot_sign(args, workspace, run_build_script, for_cache):
+
+ if run_build_script:
+ return
+ if not args.bootable:
+ return
+ if not args.secure_boot:
+ return
+ if for_cache:
+ return
+
+ for path, dirnames, filenames in os.walk(os.path.join(workspace, "root", "efi")):
+ for i in filenames:
+ if not i.endswith(".efi") and not i.endswith(".EFI"):
+ continue
+
+ with complete_step("Signing EFI binary {} in ESP".format(i)):
+ p = os.path.join(path, i)
+
+ subprocess.run(["sbsign",
+ "--key", args.secure_boot_key,
+ "--cert", args.secure_boot_certificate,
+ "--output", p + ".signed",
+ p], check=True)
+
+ os.rename(p + ".signed", p)
+
+def xz_output(args, raw):
+ if args.output_format not in (OutputFormat.raw_btrfs, OutputFormat.raw_gpt, OutputFormat.raw_squashfs):
+ return raw
+
+ if not args.xz:
+ return raw
+
+ with complete_step('Compressing image file'):
+ f = tempfile.NamedTemporaryFile(prefix=".mkosi-", dir=os.path.dirname(args.output))
+ subprocess.run(["xz", "-c", raw.name], stdout=f, check=True)
+
+ return f
+
+def write_root_hash_file(args, root_hash):
+ if root_hash is None:
+ return None
+
+ with complete_step('Writing .roothash file'):
+ f = tempfile.NamedTemporaryFile(mode='w+b', prefix='.mkosi',
+ dir=os.path.dirname(args.output_root_hash_file))
+ f.write((root_hash + "\n").encode())
+
+ return f
+
+def copy_nspawn_settings(args):
+ if args.nspawn_settings is None:
+ return None
+
+ with complete_step('Copying nspawn settings file'):
+ f = tempfile.NamedTemporaryFile(mode="w+b", prefix=".mkosi-",
+ dir=os.path.dirname(args.output_nspawn_settings))
+
+ with open(args.nspawn_settings, "rb") as c:
+ f.write(c.read())
+
+ return f
+
+def hash_file(of, sf, fname):
+ bs = 16*1024**2
+ h = hashlib.sha256()
+
+ sf.seek(0)
+ buf = sf.read(bs)
+ while len(buf) > 0:
+ h.update(buf)
+ buf = sf.read(bs)
+
+ of.write(h.hexdigest() + " *" + fname + "\n")
+
+def calculate_sha256sum(args, raw, tar, root_hash_file, nspawn_settings):
+ if args.output_format in (OutputFormat.directory, OutputFormat.subvolume):
+ return None
+
+ if not args.checksum:
+ return None
+
+ with complete_step('Calculating SHA256SUMS'):
+ f = tempfile.NamedTemporaryFile(mode="w+", prefix=".mkosi-", encoding="utf-8",
+ dir=os.path.dirname(args.output_checksum))
+
+ if raw is not None:
+ hash_file(f, raw, os.path.basename(args.output))
+ if tar is not None:
+ hash_file(f, tar, os.path.basename(args.output))
+ if root_hash_file is not None:
+ hash_file(f, root_hash_file, os.path.basename(args.output_root_hash_file))
+ if nspawn_settings is not None:
+ hash_file(f, nspawn_settings, os.path.basename(args.output_nspawn_settings))
+
+ return f
+
+def calculate_signature(args, checksum):
+ if not args.sign:
+ return None
+
+ if checksum is None:
+ return None
+
+ with complete_step('Signing SHA256SUMS'):
+ f = tempfile.NamedTemporaryFile(mode="wb", prefix=".mkosi-",
+ dir=os.path.dirname(args.output_signature))
+
+ cmdline = ["gpg", "--detach-sign"]
+
+ if args.key is not None:
+ cmdline += ["--default-key", args.key]
+
+ checksum.seek(0)
+ subprocess.run(cmdline, stdin=checksum, stdout=f, check=True)
+
+ return f
+
+def save_cache(args, workspace, raw, cache_path):
+
+ if cache_path is None:
+ return
+
+ with complete_step('Installing cache copy ',
+ 'Successfully installed cache copy ' + cache_path):
+
+ if args.output_format in (OutputFormat.raw_btrfs, OutputFormat.raw_gpt):
+ os.chmod(raw, 0o666 & ~args.original_umask)
+ shutil.move(raw, cache_path)
+ else:
+ shutil.move(os.path.join(workspace, "root"), cache_path)
+
+def link_output(args, workspace, raw, tar):
+ with complete_step('Linking image file',
+ 'Successfully linked ' + args.output):
+ if args.output_format in (OutputFormat.directory, OutputFormat.subvolume):
+ os.rename(os.path.join(workspace, "root"), args.output)
+ elif args.output_format in (OutputFormat.raw_btrfs, OutputFormat.raw_gpt, OutputFormat.raw_squashfs):
+ os.chmod(raw, 0o666 & ~args.original_umask)
+ os.link(raw, args.output)
+ else:
+ os.chmod(tar, 0o666 & ~args.original_umask)
+ os.link(tar, args.output)
+
+def link_output_nspawn_settings(args, path):
+ if path is None:
+ return
+
+ with complete_step('Linking nspawn settings file',
+ 'Successfully linked ' + args.output_nspawn_settings):
+ os.chmod(path, 0o666 & ~args.original_umask)
+ os.link(path, args.output_nspawn_settings)
+
+def link_output_checksum(args, checksum):
+ if checksum is None:
+ return
+
+ with complete_step('Linking SHA256SUMS file',
+ 'Successfully linked ' + args.output_checksum):
+ os.chmod(checksum, 0o666 & ~args.original_umask)
+ os.link(checksum, args.output_checksum)
+
+def link_output_root_hash_file(args, root_hash_file):
+ if root_hash_file is None:
+ return
+
+ with complete_step('Linking .roothash file',
+ 'Successfully linked ' + args.output_root_hash_file):
+ os.chmod(root_hash_file, 0o666 & ~args.original_umask)
+ os.link(root_hash_file, args.output_root_hash_file)
+
+def link_output_signature(args, signature):
+ if signature is None:
+ return
+
+ with complete_step('Linking SHA256SUMS.gpg file',
+ 'Successfully linked ' + args.output_signature):
+ os.chmod(signature, 0o666 & ~args.original_umask)
+ os.link(signature, args.output_signature)
+
+def dir_size(path):
+ sum = 0
+ for entry in os.scandir(path):
+ if entry.is_symlink():
+ # We can ignore symlinks because they either point into our tree,
+ # in which case we'll include the size of target directory anyway,
+ # or outside, in which case we don't need to.
+ continue
+ elif entry.is_file():
+ sum += entry.stat().st_blocks * 512
+ elif entry.is_dir():
+ sum += dir_size(entry.path)
+ return sum
+
+def print_output_size(args):
+ if args.output_format in (OutputFormat.directory, OutputFormat.subvolume):
+ print_step("Resulting image size is " + format_bytes(dir_size(args.output)) + ".")
+ else:
+ st = os.stat(args.output)
+ print_step("Resulting image size is " + format_bytes(st.st_size) + ", consumes " + format_bytes(st.st_blocks * 512) + ".")
+
+def setup_cache(args):
+ with complete_step('Setting up package cache',
+ 'Setting up package cache {} complete') as output:
+ if args.cache_path is None:
+ d = tempfile.TemporaryDirectory(dir=os.path.dirname(args.output), prefix=".mkosi-")
+ args.cache_path = d.name
+ else:
+ os.makedirs(args.cache_path, 0o755, exist_ok=True)
+ d = None
+ output.append(args.cache_path)
+
+ return d
+
+class PackageAction(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ l = getattr(namespace, self.dest)
+ if l is None:
+ l = []
+ l.extend(values.split(","))
+ setattr(namespace, self.dest, l)
+
+def parse_args():
+ parser = argparse.ArgumentParser(description='Build Legacy-Free OS Images', add_help=False)
+
+ group = parser.add_argument_group("Commands")
+ group.add_argument("verb", choices=("build", "clean", "help", "summary"), nargs='?', default="build", help='Operation to execute')
+ group.add_argument('-h', '--help', action='help', help="Show this help")
+ group.add_argument('--version', action='version', version='%(prog)s ' + __version__)
+
+ group = parser.add_argument_group("Distribution")
+ group.add_argument('-d', "--distribution", choices=Distribution.__members__, help='Distribution to install')
+ group.add_argument('-r', "--release", help='Distribution release to install')
+ group.add_argument('-m', "--mirror", help='Distribution mirror to use')
+ group.add_argument("--repositories", action=PackageAction, dest='repositories', help='Repositories to use', metavar='REPOS')
+
+ group = parser.add_argument_group("Output")
+ group.add_argument('-t', "--format", dest='output_format', choices=OutputFormat.__members__, help='Output Format')
+ group.add_argument('-o', "--output", help='Output image path', metavar='PATH')
+ group.add_argument('-f', "--force", action='count', dest='force_count', default=0, help='Remove existing image file before operation')
+ group.add_argument('-b', "--bootable", type=parse_boolean, nargs='?', const=True,
+ help='Make image bootable on EFI (only raw_gpt, raw_btrfs, raw_squashfs)')
+ group.add_argument("--secure-boot", action='store_true', help='Sign the resulting kernel/initrd image for UEFI SecureBoot')
+ group.add_argument("--secure-boot-key", help="UEFI SecureBoot private key in PEM format", metavar='PATH')
+ group.add_argument("--secure-boot-certificate", help="UEFI SecureBoot certificate in X509 format", metavar='PATH')
+ group.add_argument("--read-only", action='store_true', help='Make root volume read-only (only raw_gpt, raw_btrfs, subvolume, implied on raw_squashs)')
+ group.add_argument("--encrypt", choices=("all", "data"), help='Encrypt everything except: ESP ("all") or ESP and root ("data")')
+ group.add_argument("--verity", action='store_true', help='Add integrity partition (implies --read-only)')
+ group.add_argument("--compress", action='store_true', help='Enable compression in file system (only raw_btrfs, subvolume)')
+ group.add_argument("--xz", action='store_true', help='Compress resulting image with xz (only raw_gpt, raw_btrfs, raw_squashfs, implied on tar)')
+ group.add_argument('-i', "--incremental", action='store_true', help='Make use of and generate intermediary cache images')
+
+ group = parser.add_argument_group("Packages")
+ group.add_argument('-p', "--package", action=PackageAction, dest='packages', help='Add an additional package to the OS image', metavar='PACKAGE')
+ group.add_argument("--with-docs", action='store_true', help='Install documentation (only Fedora and Mageia)')
+ group.add_argument("--cache", dest='cache_path', help='Package cache path', metavar='PATH')
+ group.add_argument("--extra-tree", action='append', dest='extra_trees', help='Copy an extra tree on top of image', metavar='PATH')
+ group.add_argument("--build-script", help='Build script to run inside image', metavar='PATH')
+ group.add_argument("--build-sources", help='Path for sources to build', metavar='PATH')
+ group.add_argument("--build-dir", help='Path to use as persistent build directory', metavar='PATH')
+ group.add_argument("--build-package", action=PackageAction, dest='build_packages', help='Additional packages needed for build script', metavar='PACKAGE')
+ group.add_argument("--postinst-script", help='Post installation script to run inside image', metavar='PATH')
+ group.add_argument('--use-git-files', type=parse_boolean,
+ help='Ignore any files that git itself ignores (default: guess)')
+ group.add_argument('--git-files', choices=('cached', 'others'),
+ help='Whether to include untracked files (default: others)')
+ group.add_argument("--with-network", action='store_true', help='Run build and postinst scripts with network access (instead of private network)')
+ group.add_argument("--settings", dest='nspawn_settings', help='Add in .spawn settings file', metavar='PATH')
+
+ group = parser.add_argument_group("Partitions")
+ group.add_argument("--root-size", help='Set size of root partition (only raw_gpt, raw_btrfs)', metavar='BYTES')
+ group.add_argument("--esp-size", help='Set size of EFI system partition (only raw_gpt, raw_btrfs, raw_squashfs)', metavar='BYTES')
+ group.add_argument("--swap-size", help='Set size of swap partition (only raw_gpt, raw_btrfs, raw_squashfs)', metavar='BYTES')
+ group.add_argument("--home-size", help='Set size of /home partition (only raw_gpt, raw_squashfs)', metavar='BYTES')
+ group.add_argument("--srv-size", help='Set size of /srv partition (only raw_gpt, raw_squashfs)', metavar='BYTES')
+
+ group = parser.add_argument_group("Validation (only raw_gpt, raw_btrfs, raw_squashfs, tar)")
+ group.add_argument("--checksum", action='store_true', help='Write SHA256SUMS file')
+ group.add_argument("--sign", action='store_true', help='Write and sign SHA256SUMS file')
+ group.add_argument("--key", help='GPG key to use for signing')
+ group.add_argument("--password", help='Set the root password')
+
+ group = parser.add_argument_group("Additional Configuration")
+ group.add_argument('-C', "--directory", help='Change to specified directory before doing anything', metavar='PATH')
+ group.add_argument("--default", dest='default_path', help='Read configuration data from file', metavar='PATH')
+ group.add_argument("--kernel-commandline", help='Set the kernel command line (only bootable images)')
+ group.add_argument("--hostname", help="Set hostname")
+
+ try:
+ argcomplete.autocomplete(parser)
+ except NameError:
+ pass
+
+ args = parser.parse_args()
+
+ if args.verb == "help":
+ parser.print_help()
+ sys.exit(0)
+
+ return args
+
+def parse_bytes(bytes):
+ if bytes is None:
+ return bytes
+
+ if bytes.endswith('G'):
+ factor = 1024**3
+ elif bytes.endswith('M'):
+ factor = 1024**2
+ elif bytes.endswith('K'):
+ factor = 1024
+ else:
+ factor = 1
+
+ if factor > 1:
+ bytes = bytes[:-1]
+
+ result = int(bytes) * factor
+ if result <= 0:
+ raise ValueError("Size out of range")
+
+ if result % 512 != 0:
+ raise ValueError("Size not a multiple of 512")
+
+ return result
+
+def detect_distribution():
+ try:
+ f = open("/etc/os-release")
+ except IOError:
+ try:
+ f = open("/usr/lib/os-release")
+ except IOError:
+ return None, None
+
+ id = None
+ version_id = None
+
+ for ln in f:
+ if ln.startswith("ID="):
+ id = ln[3:].strip()
+ if ln.startswith("VERSION_ID="):
+ version_id = ln[11:].strip()
+
+ d = Distribution.__members__.get(id, None)
+ return d, version_id
+
+def unlink_try_hard(path):
+ try:
+ os.unlink(path)
+ except:
+ pass
+
+ try:
+ btrfs_subvol_delete(path)
+ except:
+ pass
+
+ try:
+ shutil.rmtree(path)
+ except:
+ pass
+
+def empty_directory(path):
+
+ for f in os.listdir(path):
+ unlink_try_hard(os.path.join(path, f))
+
+def unlink_output(args):
+ if not args.force and args.verb != "clean":
+ return
+
+ with complete_step('Removing output files'):
+ unlink_try_hard(args.output)
+
+ if args.checksum:
+ unlink_try_hard(args.output_checksum)
+
+ if args.verity:
+ unlink_try_hard(args.output_root_hash_file)
+
+ if args.sign:
+ unlink_try_hard(args.output_signature)
+
+ if args.nspawn_settings is not None:
+ unlink_try_hard(args.output_nspawn_settings)
+
+ # We remove the cache if either the user used --force twice, or he called "clean" with it passed once
+ if args.verb == "clean":
+ remove_cache = args.force_count > 0
+ else:
+ remove_cache = args.force_count > 1
+
+ if remove_cache:
+
+ if args.cache_pre_dev is not None or args.cache_pre_inst is not None:
+ with complete_step('Removing incremental cache files'):
+ if args.cache_pre_dev is not None:
+ unlink_try_hard(args.cache_pre_dev)
+
+ if args.cache_pre_inst is not None:
+ unlink_try_hard(args.cache_pre_inst)
+
+ if args.build_dir is not None:
+ with complete_step('Clearing out build directory'):
+ empty_directory(args.build_dir)
+
+def parse_boolean(s):
+ "Parse 1/true/yes as true and 0/false/no as false"
+ if s in {"1", "true", "yes"}:
+ return True
+
+ if s in {"0", "false", "no"}:
+ return False
+
+ raise ValueError("Invalid literal for bool(): {!r}".format(s))
+
+def process_setting(args, section, key, value):
+ if section == "Distribution":
+ if key == "Distribution":
+ if args.distribution is None:
+ args.distribution = value
+ elif key == "Release":
+ if args.release is None:
+ args.release = value
+ elif key == "Repositories":
+ list_value = value if type(value) == list else value.split()
+ if args.repositories is None:
+ args.repositories = list_value
+ else:
+ args.repositories.extend(list_value)
+ elif key == "Mirror":
+ if args.mirror is None:
+ args.mirror = value
+ elif key is None:
+ return True
+ else:
+ return False
+ elif section == "Output":
+ if key == "Format":
+ if args.output_format is None:
+ args.output_format = value
+ elif key == "Output":
+ if args.output is None:
+ args.output = value
+ elif key == "Force":
+ if not args.force:
+ args.force = parse_boolean(value)
+ elif key == "Bootable":
+ if args.bootable is None:
+ args.bootable = parse_boolean(value)
+ elif key == "KernelCommandLine":
+ if args.kernel_commandline is None:
+ args.kernel_commandline = value
+ elif key == "SecureBoot":
+ if not args.secure_boot:
+ args.secure_boot = parse_boolean(value)
+ elif key == "SecureBootKey":
+ if args.secure_boot_key is None:
+ args.secure_boot_key = value
+ elif key == "SecureBootCertificate":
+ if args.secure_boot_certificate is None:
+ args.secure_boot_certificate = value
+ elif key == "ReadOnly":
+ if not args.read_only:
+ args.read_only = parse_boolean(value)
+ elif key == "Encrypt":
+ if args.encrypt is None:
+ if value not in ("all", "data"):
+ raise ValueError("Invalid encryption setting: "+ value)
+ args.encrypt = value
+ elif key == "Verity":
+ if not args.verity:
+ args.verity = parse_boolean(value)
+ elif key == "Compress":
+ if not args.compress:
+ args.compress = parse_boolean(value)
+ elif key == "XZ":
+ if not args.xz:
+ args.xz = parse_boolean(value)
+ elif key == "Hostname":
+ if not args.hostname:
+ args.hostname = value
+ elif key is None:
+ return True
+ else:
+ return False
+ elif section == "Packages":
+ if key == "Packages":
+ list_value = value if type(value) == list else value.split()
+ if args.packages is None:
+ args.packages = list_value
+ else:
+ args.packages.extend(list_value)
+ elif key == "WithDocs":
+ if not args.with_docs:
+ args.with_docs = parse_boolean(value)
+ elif key == "Cache":
+ if args.cache_path is None:
+ args.cache_path = value
+ elif key == "ExtraTrees":
+ list_value = value if type(value) == list else value.split()
+ if args.extra_trees is None:
+ args.extra_trees = list_value
+ else:
+ args.extra_trees.extend(list_value)
+ elif key == "BuildScript":
+ if args.build_script is None:
+ args.build_script = value
+ elif key == "BuildSources":
+ if args.build_sources is None:
+ args.build_sources = value
+ elif key == "BuildDirectory":
+ if args.build_dir is None:
+ args.build_dir = value
+ elif key == "BuildPackages":
+ list_value = value if type(value) == list else value.split()
+ if args.build_packages is None:
+ args.build_packages = list_value
+ else:
+ args.build_packages.extend(list_value)
+ elif key == "PostInstallationScript":
+ if args.postinst_script is None:
+ args.postinst_script = value
+ elif key == "WithNetwork":
+ if not args.with_network:
+ args.with_network = parse_boolean(value)
+ elif key == "NSpawnSettings":
+ if args.nspawn_settings is None:
+ args.nspawn_settings = value
+ elif key is None:
+ return True
+ else:
+ return False
+ elif section == "Partitions":
+ if key == "RootSize":
+ if args.root_size is None:
+ args.root_size = value
+ elif key == "ESPSize":
+ if args.esp_size is None:
+ args.esp_size = value
+ elif key == "SwapSize":
+ if args.swap_size is None:
+ args.swap_size = value
+ elif key == "HomeSize":
+ if args.home_size is None:
+ args.home_size = value
+ elif key == "SrvSize":
+ if args.srv_size is None:
+ args.srv_size = value
+ elif key is None:
+ return True
+ else:
+ return False
+ elif section == "Validation":
+ if key == "CheckSum":
+ if not args.checksum:
+ args.checksum = parse_boolean(value)
+ elif key == "Sign":
+ if not args.sign:
+ args.sign = parse_boolean(value)
+ elif key == "Key":
+ if args.key is None:
+ args.key = value
+ elif key == "Password":
+ if args.password is None:
+ args.password = value
+ elif key is None:
+ return True
+ else:
+ return False
+ else:
+ return False
+
+ return True
+
+def load_defaults_file(fname, options):
+ try:
+ f = open(fname, "r")
+ except FileNotFoundError:
+ return
+
+ config = configparser.ConfigParser(delimiters='=')
+ config.optionxform = str
+ config.read_file(f)
+
+ # this is used only for validation
+ args = parse_args()
+
+ for section in config.sections():
+ if not process_setting(args, section, None, None):
+ sys.stderr.write("Unknown section in {}, ignoring: [{}]\n".format(fname, section))
+ continue
+ if section not in options:
+ options[section] = {}
+ for key in config[section]:
+ if not process_setting(args, section, key, config[section][key]):
+ sys.stderr.write("Unknown key in section [{}] in {}, ignoring: {}=\n".format(section, fname, key))
+ continue
+ if section == "Packages" and key in ["Packages", "ExtraTrees", "BuildPackages"]:
+ if key in options[section]:
+ options[section][key].extend(config[section][key].split())
+ else:
+ options[section][key] = config[section][key].split()
+ else:
+ options[section][key] = config[section][key]
+ return options
+
+def load_defaults(args):
+ fname = "mkosi.default" if args.default_path is None else args.default_path
+
+ config = {}
+ load_defaults_file(fname, config)
+
+ defaults_dir = fname + '.d'
+ if os.path.isdir(defaults_dir):
+ for defaults_file in sorted(os.listdir(defaults_dir)):
+ defaults_path = os.path.join(defaults_dir, defaults_file)
+ if os.path.isfile(defaults_path):
+ load_defaults_file(defaults_path, config)
+
+ for section in config.keys():
+ for key in config[section]:
+ process_setting(args, section, key, config[section][key])
+
+def find_nspawn_settings(args):
+ if args.nspawn_settings is not None:
+ return
+
+ if os.path.exists("mkosi.nspawn"):
+ args.nspawn_settings = "mkosi.nspawn"
+
+def find_extra(args):
+ if os.path.exists("mkosi.extra"):
+ if args.extra_trees is None:
+ args.extra_trees = ["mkosi.extra"]
+ else:
+ args.extra_trees.append("mkosi.extra")
+
+def find_cache(args):
+
+ if args.cache_path is not None:
+ return
+
+ if os.path.exists("mkosi.cache/"):
+ args.cache_path = "mkosi.cache/" + args.distribution.name + "~" + args.release
+
+def find_build_script(args):
+ if args.build_script is not None:
+ return
+
+ if os.path.exists("mkosi.build"):
+ args.build_script = "mkosi.build"
+
+def find_build_sources(args):
+ if args.build_sources is not None:
+ return
+
+ args.build_sources = os.getcwd()
+
+def find_build_dir(args):
+ if args.build_dir is not None:
+ return
+
+ if os.path.exists("mkosi.builddir/"):
+ args.build_dir = "mkosi.builddir"
+
+def find_postinst_script(args):
+ if args.postinst_script is not None:
+ return
+
+ if os.path.exists("mkosi.postinst"):
+ args.postinst_script = "mkosi.postinst"
+
+def find_passphrase(args):
+
+ if args.encrypt is None:
+ args.passphrase = None
+ return
+
+ try:
+ passphrase_mode = os.stat('mkosi.passphrase').st_mode & (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
+ if (passphrase_mode & stat.S_IRWXU > 0o600) or (passphrase_mode & (stat.S_IRWXG | stat.S_IRWXO) > 0):
+ die("Permissions of 'mkosi.passphrase' of '{}' are too open. When creating passphrase files please make sure to choose an access mode that restricts access to the owner only. Aborting.\n".format(oct(passphrase_mode)))
+
+ args.passphrase = { 'type': 'file', 'content': 'mkosi.passphrase' }
+
+ except FileNotFoundError:
+ while True:
+ passphrase = getpass.getpass("Please enter passphrase: ")
+ passphrase_confirmation = getpass.getpass("Passphrase confirmation: ")
+ if passphrase == passphrase_confirmation:
+ args.passphrase = { 'type': 'stdin', 'content': passphrase }
+ break
+
+ sys.stderr.write("Passphrase doesn't match confirmation. Please try again.\n")
+
+def find_secure_boot(args):
+ if not args.secure_boot:
+ return
+
+ if args.secure_boot_key is None:
+ if os.path.exists("mkosi.secure-boot.key"):
+ args.secure_boot_key = "mkosi.secure-boot.key"
+
+ if args.secure_boot_certificate is None:
+ if os.path.exists("mkosi.secure-boot.crt"):
+ args.secure_boot_certificate = "mkosi.secure-boot.crt"
+
+def strip_suffixes(path):
+ t = path
+ while True:
+ if t.endswith(".xz"):
+ t = t[:-3]
+ elif t.endswith(".raw"):
+ t = t[:-4]
+ elif t.endswith(".tar"):
+ t = t[:-4]
+ else:
+ break
+
+ return t
+
+def build_nspawn_settings_path(path):
+ return strip_suffixes(path) + ".nspawn"
+
+def build_root_hash_file_path(path):
+ return strip_suffixes(path) + ".roothash"
+
+def load_args():
+ args = parse_args()
+
+ if args.directory is not None:
+ os.chdir(args.directory)
+
+ load_defaults(args)
+ find_nspawn_settings(args)
+ find_extra(args)
+ find_build_script(args)
+ find_build_sources(args)
+ find_build_dir(args)
+ find_postinst_script(args)
+ find_passphrase(args)
+ find_secure_boot(args)
+
+ args.force = args.force_count > 0
+
+ if args.output_format is None:
+ args.output_format = OutputFormat.raw_gpt
+ else:
+ args.output_format = OutputFormat[args.output_format]
+
+ if args.distribution is not None:
+ args.distribution = Distribution[args.distribution]
+
+ if args.distribution is None or args.release is None:
+ d, r = detect_distribution()
+
+ if args.distribution is None:
+ args.distribution = d
+
+ if args.distribution == d and args.release is None:
+ args.release = r
+
+ if args.distribution is None:
+ die("Couldn't detect distribution.")
+
+ if args.release is None:
+ if args.distribution == Distribution.fedora:
+ args.release = "25"
+ if args.distribution == Distribution.mageia:
+ args.release = "6"
+ elif args.distribution == Distribution.debian:
+ args.release = "unstable"
+ elif args.distribution == Distribution.ubuntu:
+ args.release = "yakkety"
+ elif args.distribution == Distribution.opensuse:
+ args.release = "tumbleweed"
+
+ find_cache(args)
+
+ if args.mirror is None:
+ if args.distribution == Distribution.fedora:
+ args.mirror = None
+ elif args.distribution == Distribution.debian:
+ args.mirror = "http://deb.debian.org/debian"
+ elif args.distribution == Distribution.ubuntu:
+ args.mirror = "http://archive.ubuntu.com/ubuntu"
+ if platform.machine() == "aarch64":
+ args.mirror = "http://ports.ubuntu.com/"
+ elif args.distribution == Distribution.arch:
+ args.mirror = "https://mirrors.kernel.org/archlinux"
+ if platform.machine() == "aarch64":
+ args.mirror = "http://mirror.archlinuxarm.org"
+ elif args.distribution == Distribution.opensuse:
+ args.mirror = "https://download.opensuse.org"
+
+ if args.bootable:
+ if args.distribution == Distribution.ubuntu:
+ die("Bootable images are currently not supported on Ubuntu.")
+
+ if args.output_format in (OutputFormat.directory, OutputFormat.subvolume, OutputFormat.tar):
+ die("Directory, subvolume and tar images cannot be booted.")
+
+ if args.encrypt is not None:
+ if args.output_format not in (OutputFormat.raw_gpt, OutputFormat.raw_btrfs, OutputFormat.raw_squashfs):
+ die("Encryption is only supported for raw gpt, btrfs or squashfs images.")
+
+ if args.encrypt == "data" and args.output_format == OutputFormat.raw_btrfs:
+ die("'data' encryption mode not supported on btrfs, use 'all' instead.")
+
+ if args.encrypt == "all" and args.verity:
+ die("'all' encryption mode may not be combined with Verity.")
+
+ if args.sign:
+ args.checksum = True
+
+ if args.output is None:
+ if args.output_format in (OutputFormat.raw_gpt, OutputFormat.raw_btrfs, OutputFormat.raw_squashfs):
+ if args.xz:
+ args.output = "image.raw.xz"
+ else:
+ args.output = "image.raw"
+ elif args.output_format == OutputFormat.tar:
+ args.output = "image.tar.xz"
+ else:
+ args.output = "image"
+
+ if args.incremental or args.verb == "clean":
+ args.cache_pre_dev = args.output + ".cache-pre-dev"
+ args.cache_pre_inst = args.output + ".cache-pre-inst"
+ else:
+ args.cache_pre_dev = None
+ args.cache_pre_inst = None
+
+ args.output = os.path.abspath(args.output)
+
+ if args.output_format == OutputFormat.tar:
+ args.xz = True
+
+ if args.output_format == OutputFormat.raw_squashfs:
+ args.read_only = True
+ args.compress = True
+ args.root_size = None
+
+ if args.verity:
+ args.read_only = True
+ args.output_root_hash_file = build_root_hash_file_path(args.output)
+
+ if args.checksum:
+ args.output_checksum = os.path.join(os.path.dirname(args.output), "SHA256SUMS")
+
+ if args.sign:
+ args.output_signature = os.path.join(os.path.dirname(args.output), "SHA256SUMS.gpg")
+
+ if args.nspawn_settings is not None:
+ args.nspawn_settings = os.path.abspath(args.nspawn_settings)
+ args.output_nspawn_settings = build_nspawn_settings_path(args.output)
+
+ if args.build_script is not None:
+ args.build_script = os.path.abspath(args.build_script)
+
+ if args.build_sources is not None:
+ args.build_sources = os.path.abspath(args.build_sources)
+
+ if args.build_dir is not None:
+ args.build_dir = os.path.abspath(args.build_dir)
+
+ if args.postinst_script is not None:
+ args.postinst_script = os.path.abspath(args.postinst_script)
+
+ if args.extra_trees is not None:
+ for i in range(len(args.extra_trees)):
+ args.extra_trees[i] = os.path.abspath(args.extra_trees[i])
+
+ args.root_size = parse_bytes(args.root_size)
+ args.home_size = parse_bytes(args.home_size)
+ args.srv_size = parse_bytes(args.srv_size)
+ args.esp_size = parse_bytes(args.esp_size)
+ args.swap_size = parse_bytes(args.swap_size)
+
+ if args.output_format in (OutputFormat.raw_gpt, OutputFormat.raw_btrfs) and args.root_size is None:
+ args.root_size = 1024*1024*1024
+
+ if args.bootable and args.esp_size is None:
+ args.esp_size = 256*1024*1024
+
+ args.verity_size = None
+
+ if args.bootable and args.kernel_commandline is None:
+ args.kernel_commandline = "rhgb quiet selinux=0 audit=0 rw"
+
+ if args.secure_boot_key is not None:
+ args.secure_boot_key = os.path.abspath(args.secure_boot_key)
+
+ if args.secure_boot_certificate is not None:
+ args.secure_boot_certificate = os.path.abspath(args.secure_boot_certificate)
+
+ if args.secure_boot:
+ if args.secure_boot_key is None:
+ die("UEFI SecureBoot enabled, but couldn't find private key. (Consider placing it in mkosi.secure-boot.key?)")
+
+ if args.secure_boot_certificate is None:
+ die("UEFI SecureBoot enabled, but couldn't find certificate. (Consider placing it in mkosi.secure-boot.crt?)")
+
+ return args
+
+def check_output(args):
+ for f in (args.output,
+ args.output_checksum if args.checksum else None,
+ args.output_signature if args.sign else None,
+ args.output_nspawn_settings if args.nspawn_settings is not None else None,
+ args.output_root_hash_file if args.verity else None):
+
+ if f is None:
+ continue
+
+ if os.path.exists(f):
+ die("Output file " + f + " exists already. (Consider invocation with --force.)")
+
+def yes_no(b):
+ return "yes" if b else "no"
+
+def format_bytes_or_disabled(sz):
+ if sz is None:
+ return "(disabled)"
+
+ return format_bytes(sz)
+
+def format_bytes_or_auto(sz):
+ if sz is None:
+ return "(automatic)"
+
+ return format_bytes(sz)
+
+def none_to_na(s):
+ return "n/a" if s is None else s
+
+def none_to_no(s):
+ return "no" if s is None else s
+
+def none_to_none(s):
+ return "none" if s is None else s
+
+def line_join_list(l):
+
+ if l is None:
+ return "none"
+
+ return "\n ".join(l)
+
+def print_summary(args):
+ sys.stderr.write("DISTRIBUTION:\n")
+ sys.stderr.write(" Distribution: " + args.distribution.name + "\n")
+ sys.stderr.write(" Release: " + none_to_na(args.release) + "\n")
+ if args.mirror is not None:
+ sys.stderr.write(" Mirror: " + args.mirror + "\n")
+ sys.stderr.write("\nOUTPUT:\n")
+ if args.hostname:
+ sys.stderr.write(" Hostname: " + args.hostname + "\n")
+ sys.stderr.write(" Output Format: " + args.output_format.name + "\n")
+ sys.stderr.write(" Output: " + args.output + "\n")
+ sys.stderr.write(" Output Checksum: " + none_to_na(args.output_checksum if args.checksum else None) + "\n")
+ sys.stderr.write(" Output Signature: " + none_to_na(args.output_signature if args.sign else None) + "\n")
+ sys.stderr.write("Output nspawn Settings: " + none_to_na(args.output_nspawn_settings if args.nspawn_settings is not None else None) + "\n")
+ sys.stderr.write(" Incremental: " + yes_no(args.incremental) + "\n")
+
+ if args.output_format in (OutputFormat.raw_gpt, OutputFormat.raw_btrfs, OutputFormat.raw_squashfs, OutputFormat.subvolume):
+ sys.stderr.write(" Read-only: " + yes_no(args.read_only) + "\n")
+ if args.output_format in (OutputFormat.raw_btrfs, OutputFormat.subvolume):
+ sys.stderr.write(" FS Compression: " + yes_no(args.compress) + "\n")
+
+ if args.output_format in (OutputFormat.raw_gpt, OutputFormat.raw_btrfs, OutputFormat.raw_squashfs, OutputFormat.tar):
+ sys.stderr.write(" XZ Compression: " + yes_no(args.xz) + "\n")
+
+ sys.stderr.write(" Encryption: " + none_to_no(args.encrypt) + "\n")
+ sys.stderr.write(" Verity: " + yes_no(args.verity) + "\n")
+
+ if args.output_format in (OutputFormat.raw_gpt, OutputFormat.raw_btrfs, OutputFormat.raw_squashfs):
+ sys.stderr.write(" Bootable: " + yes_no(args.bootable) + "\n")
+
+ if args.bootable:
+ sys.stderr.write(" Kernel Command Line: " + args.kernel_commandline + "\n")
+ sys.stderr.write(" UEFI SecureBoot: " + yes_no(args.secure_boot) + "\n")
+
+ if args.secure_boot:
+ sys.stderr.write(" UEFI SecureBoot Key: " + args.secure_boot_key + "\n")
+ sys.stderr.write(" UEFI SecureBoot Cert.: " + args.secure_boot_certificate + "\n")
+
+ sys.stderr.write("\nPACKAGES:\n")
+ sys.stderr.write(" Packages: " + line_join_list(args.packages) + "\n")
+
+ if args.distribution in (Distribution.fedora, Distribution.mageia):
+ sys.stderr.write(" With Documentation: " + yes_no(args.with_docs) + "\n")
+
+ sys.stderr.write(" Package Cache: " + none_to_none(args.cache_path) + "\n")
+ sys.stderr.write(" Extra Trees: " + line_join_list(args.extra_trees) + "\n")
+ sys.stderr.write(" Build Script: " + none_to_none(args.build_script) + "\n")
+ sys.stderr.write(" Build Sources: " + none_to_none(args.build_sources) + "\n")
+ sys.stderr.write(" Build Directory: " + none_to_none(args.build_dir) + "\n")
+ sys.stderr.write(" Build Packages: " + line_join_list(args.build_packages) + "\n")
+ sys.stderr.write(" Post Inst. Script: " + none_to_none(args.postinst_script) + "\n")
+ sys.stderr.write(" Scripts with network: " + yes_no(args.with_network) + "\n")
+ sys.stderr.write(" nspawn Settings: " + none_to_none(args.nspawn_settings) + "\n")
+
+ if args.output_format in (OutputFormat.raw_gpt, OutputFormat.raw_btrfs, OutputFormat.raw_squashfs):
+ sys.stderr.write("\nPARTITIONS:\n")
+ sys.stderr.write(" Root Partition: " + format_bytes_or_auto(args.root_size) + "\n")
+ sys.stderr.write(" Swap Partition: " + format_bytes_or_disabled(args.swap_size) + "\n")
+ sys.stderr.write(" ESP: " + format_bytes_or_disabled(args.esp_size) + "\n")
+ sys.stderr.write(" /home Partition: " + format_bytes_or_disabled(args.home_size) + "\n")
+ sys.stderr.write(" /srv Partition: " + format_bytes_or_disabled(args.srv_size) + "\n")
+
+ if args.output_format in (OutputFormat.raw_gpt, OutputFormat.raw_btrfs, OutputFormat.raw_squashfs, OutputFormat.tar):
+ sys.stderr.write("\nVALIDATION:\n")
+ sys.stderr.write(" Checksum: " + yes_no(args.checksum) + "\n")
+ sys.stderr.write(" Sign: " + yes_no(args.sign) + "\n")
+ sys.stderr.write(" GPG Key: " + ("default" if args.key is None else args.key) + "\n")
+ sys.stderr.write(" Password: " + ("default" if args.password is None else args.password) + "\n")
+
+def reuse_cache_tree(args, workspace, run_build_script, for_cache, cached):
+ """If there's a cached version of this tree around, use it and
+ initialize our new root directly from it. Returns a boolean indicating
+ whether we are now operating on a cached version or not."""
+
+ if cached:
+ return True
+
+ if not args.incremental:
+ return False
+ if for_cache:
+ return False
+ if args.output_format in (OutputFormat.raw_gpt, OutputFormat.raw_btrfs):
+ return False
+
+ fname = args.cache_pre_dev if run_build_script else args.cache_pre_inst
+ if fname is None:
+ return False
+
+ with complete_step('Copying in cached tree ' + fname):
+ try:
+ enumerate_and_copy(fname, os.path.join(workspace, "root"))
+ except FileNotFoundError:
+ return False
+
+ return True
+
+def build_image(args, workspace, run_build_script, for_cache=False):
+
+ # If there's no build script set, there's no point in executing
+ # the build script iteration. Let's quit early.
+ if args.build_script is None and run_build_script:
+ return None, None, None
+
+ raw, cached = reuse_cache_image(args, workspace.name, run_build_script, for_cache)
+ if not cached:
+ raw = create_image(args, workspace.name, for_cache)
+
+ with attach_image_loopback(args, raw) as loopdev:
+
+ prepare_swap(args, loopdev, cached)
+ prepare_esp(args, loopdev, cached)
+
+ luks_format_root(args, loopdev, run_build_script, cached)
+ luks_format_home(args, loopdev, run_build_script, cached)
+ luks_format_srv(args, loopdev, run_build_script, cached)
+
+ with luks_setup_all(args, loopdev, run_build_script) as (encrypted_root, encrypted_home, encrypted_srv):
+
+ prepare_root(args, encrypted_root, cached)
+ prepare_home(args, encrypted_home, cached)
+ prepare_srv(args, encrypted_srv, cached)
+
+ with mount_image(args, workspace.name, loopdev, encrypted_root, encrypted_home, encrypted_srv):
+ prepare_tree(args, workspace.name, run_build_script, cached)
+
+ with mount_cache(args, workspace.name):
+ cached = reuse_cache_tree(args, workspace.name, run_build_script, for_cache, cached)
+ install_distribution(args, workspace.name, run_build_script, cached)
+ install_boot_loader(args, workspace.name, cached)
+
+ install_extra_trees(args, workspace.name, for_cache)
+ install_build_src(args, workspace.name, run_build_script, for_cache)
+ install_build_dest(args, workspace.name, run_build_script, for_cache)
+ set_root_password(args, workspace.name, run_build_script, for_cache)
+ run_postinst_script(args, workspace.name, run_build_script, for_cache)
+
+ reset_machine_id(args, workspace.name, run_build_script, for_cache)
+ make_read_only(args, workspace.name, for_cache)
+
+ squashfs = make_squashfs(args, workspace.name, for_cache)
+ insert_squashfs(args, workspace.name, raw, loopdev, squashfs, for_cache)
+
+ verity, root_hash = make_verity(args, workspace.name, encrypted_root, run_build_script, for_cache)
+ patch_root_uuid(args, loopdev, root_hash, for_cache)
+ insert_verity(args, workspace.name, raw, loopdev, verity, root_hash, for_cache)
+
+ # This time we mount read-only, as we already generated
+ # the verity data, and hence really shouldn't modify the
+ # image anymore.
+ with mount_image(args, workspace.name, loopdev, encrypted_root, encrypted_home, encrypted_srv, root_read_only=True):
+ install_unified_kernel(args, workspace.name, run_build_script, for_cache, root_hash)
+ secure_boot_sign(args, workspace.name, run_build_script, for_cache)
+
+ tar = make_tar(args, workspace.name, run_build_script, for_cache)
+
+ return raw, tar, root_hash
+
+def var_tmp(workspace):
+
+ var_tmp = os.path.join(workspace, "var-tmp")
+ try:
+ os.mkdir(var_tmp)
+ except FileExistsError:
+ pass
+
+ return var_tmp
+
+def run_build_script(args, workspace, raw):
+ if args.build_script is None:
+ return
+
+ with complete_step('Running build script'):
+ dest = os.path.join(workspace, "dest")
+ os.mkdir(dest, 0o755)
+
+ target = "--directory=" + os.path.join(workspace, "root") if raw is None else "--image=" + raw.name
+
+ cmdline = ["systemd-nspawn",
+ '--quiet',
+ target,
+ "--uuid=" + args.machine_id,
+ "--machine=mkosi-" + uuid.uuid4().hex,
+ "--as-pid2",
+ "--register=no",
+ "--bind", dest + ":/root/dest",
+ "--bind=" + var_tmp(workspace) + ":/var/tmp",
+ "--setenv=WITH_DOCS=" + ("1" if args.with_docs else "0"),
+ "--setenv=DESTDIR=/root/dest"]
+
+ if args.build_sources is not None:
+ cmdline.append("--setenv=SRCDIR=/root/src")
+ cmdline.append("--chdir=/root/src")
+
+ if args.read_only:
+ cmdline.append("--overlay=+/root/src::/root/src")
+ else:
+ cmdline.append("--chdir=/root")
+
+ if args.build_dir is not None:
+ cmdline.append("--setenv=BUILDDIR=/root/build")
+ cmdline.append("--bind=" + args.build_dir + ":/root/build")
+
+ if not args.with_network:
+ cmdline.append("--private-network")
+
+ cmdline.append("/root/" + os.path.basename(args.build_script))
+ subprocess.run(cmdline, check=True)
+
+def need_cache_images(args):
+
+ if not args.incremental:
+ return False
+
+ if args.force_count > 1:
+ return True
+
+ return not os.path.exists(args.cache_pre_dev) or not os.path.exists(args.cache_pre_inst)
+
+def remove_artifacts(args, workspace, raw, tar, run_build_script, for_cache=False):
+
+ if for_cache:
+ what = "cache build"
+ elif run_build_script:
+ what = "development build"
+ else:
+ return
+
+ if raw is not None:
+ with complete_step("Removing disk image from " + what):
+ del raw
+
+ if tar is not None:
+ with complete_step("Removing tar image from " + what):
+ del tar
+
+ with complete_step("Removing artifacts from " + what):
+ unlink_try_hard(os.path.join(workspace, "root"))
+ unlink_try_hard(os.path.join(workspace, "var-tmp"))
+
+def build_stuff(args):
+
+ # Let's define a fixed machine ID for all our build-time
+ # runs. We'll strip it off the final image, but some build-time
+ # tools (dracut...) want a fixed one, hence provide one, and
+ # always the same
+ args.machine_id = uuid.uuid4().hex
+
+ cache = setup_cache(args)
+ workspace = setup_workspace(args)
+
+ # If caching is requested, then make sure we have cache images around we can make use of
+ if need_cache_images(args):
+
+ # Generate the cache version of the build image, and store it as "cache-pre-dev"
+ raw, tar, root_hash = build_image(args, workspace, run_build_script=True, for_cache=True)
+ save_cache(args,
+ workspace.name,
+ raw.name if raw is not None else None,
+ args.cache_pre_dev)
+
+ remove_artifacts(args, workspace.name, raw, tar, run_build_script=True)
+
+ # Generate the cache version of the build image, and store it as "cache-pre-inst"
+ raw, tar, root_hash = build_image(args, workspace, run_build_script=False, for_cache=True)
+ save_cache(args,
+ workspace.name,
+ raw.name if raw is not None else None,
+ args.cache_pre_inst)
+ remove_artifacts(args, workspace.name, raw, tar, run_build_script=False)
+
+ # Run the image builder for the first (develpoment) stage in preparation for the build script
+ raw, tar, root_hash = build_image(args, workspace, run_build_script=True)
+
+ run_build_script(args, workspace.name, raw)
+ remove_artifacts(args, workspace.name, raw, tar, run_build_script=True)
+
+ # Run the image builder for the second (final) stage
+ raw, tar, root_hash = build_image(args, workspace, run_build_script=False)
+
+ raw = xz_output(args, raw)
+ root_hash_file = write_root_hash_file(args, root_hash)
+ settings = copy_nspawn_settings(args)
+ checksum = calculate_sha256sum(args, raw, tar, root_hash_file, settings)
+ signature = calculate_signature(args, checksum)
+
+ link_output(args,
+ workspace.name,
+ raw.name if raw is not None else None,
+ tar.name if tar is not None else None)
+
+ link_output_root_hash_file(args, root_hash_file.name if root_hash_file is not None else None)
+
+ link_output_checksum(args,
+ checksum.name if checksum is not None else None)
+
+ link_output_signature(args,
+ signature.name if signature is not None else None)
+
+ link_output_nspawn_settings(args,
+ settings.name if settings is not None else None)
+
+ if root_hash is not None:
+ print_step("Root hash is {}.".format(root_hash))
+
+def check_root():
+ if os.getuid() != 0:
+ die("Must be invoked as root.")
+
+
+def main():
+ args = load_args()
+
+ if args.verb in ("build", "clean"):
+ check_root()
+ unlink_output(args)
+
+ if args.verb == "build":
+ check_output(args)
+
+ if args.verb in ("build", "summary"):
+ print_summary(args)
+
+ if args.verb == "build":
+ check_root()
+ init_namespace(args)
+ build_stuff(args)
+ print_output_size(args)
+
+if __name__ == "__main__":
+ main()