diff --git a/test/test_build.py b/test/test_build.py index 449ace58..7edb626f 100644 --- a/test/test_build.py +++ b/test/test_build.py @@ -113,11 +113,25 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): password = "password" kargs = "systemd.journald.forward_to_console=1" + container_ref = tc.container_ref + + if tc.sign: + image_signer = testutil.ImageSigner() + container_ref = image_signer.sign(container_ref) + registry_config = image_signer.registry_config + gpg_pub_key = image_signer.gpg_pub_key + sigstore_dir = image_signer.sigstore_dir + image_signer_args = [ + "-v", f"{registry_config}:{registry_config}", + "-v", f"{gpg_pub_key}:{gpg_pub_key}", + "-v", f"{sigstore_dir}:{sigstore_dir}", + ] + # params can be long and the qmp socket (that has a limit of 100ish # AF_UNIX) is derived from the path # hash the container_ref+target_arch, but exclude the image_type so that the output path is shared between calls to # different image type combinations - output_path = shared_tmpdir / format(abs(hash(tc.container_ref + str(tc.target_arch))), "x") + output_path = shared_tmpdir / format(abs(hash(container_ref + str(tc.target_arch))), "x") output_path.mkdir(exist_ok=True) # make sure that the test store exists, because podman refuses to start if the source directory for a volume @@ -164,7 +178,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): bib_output = bib_output_path.read_text(encoding="utf8") results.append(ImageBuildResult( image_type, generated_img, tc.target_arch, tc.osinfo_template, - tc.container_ref, tc.rootfs, username, password, + container_ref, tc.rootfs, username, password, ssh_keyfile_private_path, kargs, bib_output, journal_output)) # generate new keyfile @@ -257,15 +271,19 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): if tc.local: cmd.extend(["-v", "/var/lib/containers/storage:/var/lib/containers/storage"]) + if tc.sign: + cmd.extend(image_signer_args) + cmd.extend([ *creds_args, build_container, - tc.container_ref, + container_ref, *types_arg, *upload_args, *target_arch_args, *tc.bib_rootfs_args(), "--local" if tc.local else "--local=false", + "--tls-verify=false" if tc.sign else "--tls-verify=true" ]) # print the build command for easier tracing @@ -299,7 +317,7 @@ def del_ami(): for image_type in image_types: results.append(ImageBuildResult( image_type, artifact[image_type], tc.target_arch, tc.osinfo_template, - tc.container_ref, tc.rootfs, username, password, + container_ref, tc.rootfs, username, password, ssh_keyfile_private_path, kargs, bib_output, journal_output, metadata)) yield results @@ -316,7 +334,7 @@ def del_ami(): img.unlink() else: print("does not exist") - subprocess.run(["podman", "rmi", tc.container_ref], check=False) + subprocess.run(["podman", "rmi", container_ref], check=False) return @@ -326,6 +344,11 @@ def test_container_builds(build_container): assert build_container in output +@pytest.mark.parametrize("image_type", gen_testcases("signed"), indirect=["image_type"]) +def test_signed_images(image_type): + assert image_type.img_path.exists(), "output file missing, dir "\ + f"content: {os.listdir(os.fspath(image_type.img_path))}" + @pytest.mark.parametrize("image_type", gen_testcases("multidisk"), indirect=["image_type"]) def test_image_is_generated(image_type): assert image_type.img_path.exists(), "output file missing, dir "\ diff --git a/test/testcases.py b/test/testcases.py index f6425d5e..10d71803 100644 --- a/test/testcases.py +++ b/test/testcases.py @@ -23,6 +23,12 @@ class TestCase: # rootfs to use (e.g. ext4), some containers like fedora do not # have a default rootfs. If unset the container default is used. rootfs: str = "" + # osinfo_template is a string template describing the OS detected by + # 'osinfo-detect'. It can contain '{arch}' that will be replaced with the + # actual container image arch + osinfo_template: str = "" + # Sign the container_ref and use the new signed image instead of the original one + sign: bool = False def bib_rootfs_args(self): if self.rootfs: @@ -93,6 +99,15 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements for klass in (TestCaseCentos, TestCaseFedora) for img in CLOUD_BOOT_IMAGE_TYPES + DISK_IMAGE_TYPES + ["anaconda-iso"] ] + if what == "signed": + return [ + klass(image=img, local=False, sign="True") + for klass in (TestCaseCentos, TestCaseFedora) + # We can add CLOUD_BOOT_IMAGE_TYPES + DISK_IMAGE_TYPES below + # once https://github.com/osbuild/images/pull/990 and + # https://github.com/osbuild/osbuild/pull/1906 are merged + for img in ["anaconda-iso"] + ] if what == "multidisk": # single test that specifies all image types image = "+".join(DISK_IMAGE_TYPES) diff --git a/test/testutil.py b/test/testutil.py index 6033327b..6b0e9395 100644 --- a/test/testutil.py +++ b/test/testutil.py @@ -4,6 +4,7 @@ import shutil import socket import subprocess +import tempfile import time import boto3 @@ -147,3 +148,82 @@ def create_filesystem_customizations(rootfs: str): "-v", "/var/lib/containers/storage:/var/lib/containers/storage", "--security-opt", "label=type:unconfined_t", ] + + +class ImageSigner(): + sigstore_dir = "/var/lib/containers/sigstore" + registry_config = "/etc/containers/registries.d/localhost.yaml" + gpg_email = "bootc-image-builder@redhat.com" + gpg_pub_key = "/etc/pki/rpm-gpg/RPM-GPG-KEY-booc-image-builder" + gpg_passphrase = "redhat" + + def __init__(self): + default_route = subprocess.run([ + "ip", + "route", + "list", + "default" + ], check=True, capture_output=True).stdout + default_ip = default_route.split()[8].decode("utf-8") + self.local_registry = f"{default_ip}:5000" + + def sign(self, container_ref): + if not os.path.exists(self.gpg_pub_key): + subprocess.run([ + "gpg", + "--quick-gen-key", + "--batch", + "--passphrase", self.gpg_passphrase, + self.gpg_email + ], check=True) + subprocess.run([ + "gpg", + "--output", self.gpg_pub_key, + "--armor", + "--export", + self.gpg_email + ], check=True) + subprocess.run([ + "podman", "image", "trust", "set", + "--pubkeysfile", self.gpg_pub_key, + "--type", "signedBy", + self.local_registry + ], check=True) + + registry_lookaside_config = f"""docker: + {self.local_registry}: + lookaside: file:///{self.sigstore_dir} + lookaside-staging: file:///{self.sigstore_dir} + """ + with open(self.registry_config, mode="w") as registry_config_file: + registry_config_file.write(registry_lookaside_config) + + registry_container_name = subprocess.run([ + "podman", "ps", "--filter", "name=registry", "--format", "{{.Names}}" + ], check=True, capture_output=True).stdout.strip() + + if registry_container_name != b"registry": + subprocess.run([ + "podman", "run", "-d", + "-p", "5000:5000", + "--restart", "always", + "--name", "registry", + "registry:2" + ], check=True) + + container_ref_path = container_ref[container_ref.index('/'):] + signed_container_ref = f"{self.local_registry}{container_ref_path}" + with tempfile.NamedTemporaryFile(mode="w") as gpg_passphrase_file: + gpg_passphrase_file.write(self.gpg_passphrase) + gpg_passphrase_file.flush() + subprocess.run([ + "skopeo", "copy", + "--dest-tls-verify=false", + "--remove-signatures", + "--sign-by", self.gpg_email, + "--sign-passphrase-file", gpg_passphrase_file.name, + "docker://{}".format(container_ref), + "docker://{}".format(signed_container_ref), + ], check=True) + + return signed_container_ref