From a28961579d9b59eaa8fb4fab3f846acd3e601c9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Tue, 2 Mar 2021 14:45:35 +0100 Subject: [PATCH 01/24] Add package repository scripts, run in CI This is part of the effort to abandon Bintray[0] in favor of a self-hosted package repository solution. The current approach involves pushing the packages and repository metadata to an S3 bucket, which will eventually be served from a CDN (CloudFront). Part of #1247 [0]: https://jfrog.com/blog/into-the-sunset-bintray-jcenter-gocenter-and-chartcenter/ --- .github/workflows/all.yml | 25 +- .gitignore | 5 +- packaging/Dockerfile | 21 ++ packaging/bin/create-deb-repo.sh | 80 ++++++ packaging/bin/create-msi-repo.sh | 31 ++ packaging/bin/create-rpm-repo.sh | 49 ++++ packaging/bin/entrypoint.sh | 38 +++ packaging/bin/generate_index.py | 468 +++++++++++++++++++++++++++++++ packaging/docker-compose.yml | 14 + 9 files changed, 720 insertions(+), 11 deletions(-) create mode 100644 packaging/Dockerfile create mode 100755 packaging/bin/create-deb-repo.sh create mode 100755 packaging/bin/create-msi-repo.sh create mode 100755 packaging/bin/create-rpm-repo.sh create mode 100755 packaging/bin/entrypoint.sh create mode 100755 packaging/bin/generate_index.py create mode 100644 packaging/docker-compose.yml diff --git a/.github/workflows/all.yml b/.github/workflows/all.yml index dd77f6e474c1..d2d30a699e21 100644 --- a/.github/workflows/all.yml +++ b/.github/workflows/all.yml @@ -366,13 +366,15 @@ jobs: done hub release create "${assets[@]}" -m "$VERSION" -m "$(cat ./release\ notes/${VERSION}.md)" "$VERSION" - publish-bintray: + publish-packages: runs-on: ubuntu-latest needs: [configure, build, package-windows] if: startsWith(github.ref, 'refs/tags/v') env: VERSION: ${{ needs.configure.outputs.version }} steps: + - name: Checkout code + uses: actions/checkout@v2 - name: Download binaries uses: actions/download-artifact@v2 with: @@ -383,13 +385,16 @@ jobs: with: name: binaries-windows path: dist - - name: Upload packages to Bintray + - name: Setup docker-compose environment + run: | + cat > packaging/.env < packaging/sign-key.gpg + - name: Publish packages run: | - curl -fsS -H "X-GPG-PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}" -T "dist/k6-$VERSION-amd64.deb" \ - "https://${{ secrets.BINTRAY_USER }}:${{ secrets.BINTRAY_KEY }}@api.bintray.com/content/loadimpact/deb/k6/${VERSION#v}/k6-${VERSION}-amd64.deb;deb_distribution=stable;deb_component=main;deb_architecture=amd64;publish=1;override=1" - curl -fsS -H "X-GPG-PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}" -T "dist/k6-$VERSION-amd64.rpm" \ - "https://${{ secrets.BINTRAY_USER }}:${{ secrets.BINTRAY_KEY }}@api.bintray.com/content/loadimpact/rpm/k6/${VERSION#v}/k6-${VERSION}-amd64.rpm?publish=1&override=1" - curl -fsS -H "X-GPG-PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}" -T "dist/k6-$VERSION-win64.msi" \ - "https://${{ secrets.BINTRAY_USER }}:${{ secrets.BINTRAY_KEY }}@api.bintray.com/content/loadimpact/windows/k6/${VERSION#v}/k6-${VERSION}-amd64.msi?publish=1&override=1" - curl -fsS -H "X-GPG-PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}" -T "dist/k6.portable.${VERSION#v}.nupkg" \ - "https://${{ secrets.BINTRAY_USER }}:${{ secrets.BINTRAY_KEY }}@api.bintray.com/content/loadimpact/choco/k6.portable/${VERSION#v}/k6.portable.${VERSION}.nupkg?publish=1&override=1" + cd packaging + # TODO: Build and publish k6io/packager image to GH Container Registry + docker-compose run --rm packager diff --git a/.gitignore b/.gitignore index 6a7de5cfdd52..0f16c71b00e3 100644 --- a/.gitignore +++ b/.gitignore @@ -24,4 +24,7 @@ !/vendor/modules.txt /vendor/**/*.y*ml /vendor/**/.*.y*ml -/vendor/github.com/dlclark/regexp2/testoutput1 \ No newline at end of file +/vendor/github.com/dlclark/regexp2/testoutput1 + +/packaging/.env +/packaging/*.gpg diff --git a/packaging/Dockerfile b/packaging/Dockerfile new file mode 100644 index 000000000000..513ddb6ae93b --- /dev/null +++ b/packaging/Dockerfile @@ -0,0 +1,21 @@ +FROM debian:buster-20210311 + +LABEL maintainer="k6 Developers " + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update -y && \ + apt-get install -y apt-utils createrepo curl git gnupg2 python3-pip + +RUN pip3 install s3cmd + +RUN addgroup --gid 1000 k6 && \ + useradd --create-home --shell /bin/bash --no-log-init \ + --uid 1000 --gid 1000 k6 + +COPY bin/ /usr/local/bin/ + +USER k6 +WORKDIR /home/k6 + +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] diff --git a/packaging/bin/create-deb-repo.sh b/packaging/bin/create-deb-repo.sh new file mode 100755 index 000000000000..8548e1814272 --- /dev/null +++ b/packaging/bin/create-deb-repo.sh @@ -0,0 +1,80 @@ +#!/bin/bash +set -eEuo pipefail + +# External dependencies: +# - https://salsa.debian.org/apt-team/apt (apt-ftparchive, packaged in apt-utils) +# - https://github.com/s3tools/s3cmd +# s3cmd expects AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY to be set in the +# environment. +# - https://gnupg.org/ +# For signing the script expects the private signing key to already be +# imported and PGPKEYID and PGP_SIGN_KEY_PASSPHRASE to be set in the +# environment. + +_s3bucket="${S3_BUCKET-dl-k6-io}" +_usage="Usage: $0 [s3bucket=${_s3bucket}]" +PKGDIR="${1?${_usage}}" # The directory where .deb files are located +REPODIR="${2?${_usage}}" # The package repository working directory +S3PATH="${3-${_s3bucket}}/deb" + +# We don't publish i386 packages, but the repo structure is needed for +# compatibility on some systems. See https://unix.stackexchange.com/a/272916 . +architectures="amd64 i386" +# TODO: Replace with CDN URL +#repobaseurl="https://dl.k6.io/deb" +repobaseurl="http://test-dl-k6-io.s3-website.eu-north-1.amazonaws.com/deb" + +# TODO: Remove old package versions? +# Something like: https://github.com/kopia/kopia/blob/master/tools/apt-publish.sh#L23-L25 + +mkdir -p "$REPODIR" && cd "$_" + +for arch in $architectures; do + bindir="dists/stable/main/binary-$arch" + mkdir -p "$bindir" + # Download existing packages via the CDN to avoid S3 egress costs. + # An optimization might be to just append to the Packages file and upload it + # and the new package only, but updating the index.html would get messy and + # would be inconsistent with the RPM script which does require all packages to + # be present because of how createrepo works. + # TODO: Also check their hashes? Or just sync them with s3cmd which does MD5 checks... + files=$(s3cmd ls "s3://${S3PATH}/${bindir}/" | { grep -oP "(?<=/${S3PATH}/).*\.(deb|asc)" || true; }) + # curl supports parallel downloads with the -Z option since v7.68.0, but + # unfortunately Debian carries an older version, hence xargs. + echo "$files" | xargs -r -I{} -n1 -P"$(nproc)" curl -fsSLR -o "{}" "$repobaseurl/{}" + + # Copy the new packages in + find "$PKGDIR" -name "*$arch*.deb" -type f -print0 | xargs -r0 cp -t "$bindir" + # Generate signatures for files that don't have it + # TODO: Switch to debsign instead? This is currently done as Bintray did it, + # but the signature is not validated by apt/dpkg. + # https://blog.packagecloud.io/eng/2014/10/28/howto-gpg-sign-verify-deb-packages-apt-repositories/ + find "$bindir" -type f -name '*.deb' -print0 | while read -r -d $'\0' f; do + if ! [ -r "${f}.asc" ]; then + gpg2 --default-key="$PGPKEYID" --passphrase="$PGP_SIGN_KEY_PASSPHRASE" \ + --pinentry-mode=loopback --yes --detach-sign --armor -o "${f}.asc" "$f" + fi + done + apt-ftparchive packages "$bindir" | tee "$bindir/Packages" + gzip -fk "$bindir/Packages" + bzip2 -fk "$bindir/Packages" +done + +echo "Creating release file..." +apt-ftparchive release \ + -o APT::FTPArchive::Release::Origin="k6" \ + -o APT::FTPArchive::Release::Label="k6" \ + -o APT::FTPArchive::Release::Suite="stable" \ + -o APT::FTPArchive::Release::Codename="stable" \ + -o APT::FTPArchive::Release::Architectures="$architectures" \ + -o APT::FTPArchive::Release::Components="main" \ + -o APT::FTPArchive::Release::Date="$(date -Ru)" \ + "dists/stable" > "dists/stable/Release" + +# Sign release file +gpg2 --default-key="$PGPKEYID" --passphrase="$PGP_SIGN_KEY_PASSPHRASE" \ + --pinentry-mode=loopback --yes --detach-sign --armor \ + -o "dists/stable/Release.gpg" "dists/stable/Release" +gpg2 --default-key="$PGPKEYID" --passphrase="$PGP_SIGN_KEY_PASSPHRASE" \ + --pinentry-mode=loopback --yes --clear-sign \ + -o "dists/stable/InRelease" "dists/stable/Release" diff --git a/packaging/bin/create-msi-repo.sh b/packaging/bin/create-msi-repo.sh new file mode 100755 index 000000000000..c0d49cca6394 --- /dev/null +++ b/packaging/bin/create-msi-repo.sh @@ -0,0 +1,31 @@ +#!/bin/bash +set -eEuo pipefail + +# External dependencies: +# - https://github.com/s3tools/s3cmd +# s3cmd expects AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY to be set in the +# environment. + +_s3bucket="${S3_BUCKET-dl-k6-io}" +_usage="Usage: $0 [s3bucket=${_s3bucket}]" +PKGDIR="${1?${_usage}}" # The directory where .msi files are located +REPODIR="${2?${_usage}}" # The package repository working directory +S3PATH="${3-${_s3bucket}}/msi" + +# TODO: Replace with CDN URL +#repobaseurl="https://dl.k6.io/msi" +repobaseurl="http://test-dl-k6-io.s3-website.eu-north-1.amazonaws.com/msi" + +# TODO: Remove old package versions? +# Something like: https://github.com/kopia/kopia/blob/master/tools/apt-publish.sh#L23-L25 + +mkdir -p "$REPODIR" && cd "$_" + +# Download existing packages via the CDN to avoid S3 egress costs. +# For MSIs this is just needed to generate the index correctly. +# TODO: Also check their hashes? Or just sync them with s3cmd which does MD5 checks... +files=$(s3cmd ls "s3://${S3PATH}/" | { grep -oP "(?<=/${S3PATH}/).*\.msi" || true; }) +# curl supports parallel downloads with the -Z option since v7.68.0, but +# unfortunately Debian carries an older version, hence xargs. +echo "$files" | xargs -r -I{} -n1 -P"$(nproc)" curl -fsSLOR "$repobaseurl/{}" +find "$PKGDIR" -name "*.msi" -type f -print0 | xargs -r0 cp -t "$REPODIR" diff --git a/packaging/bin/create-rpm-repo.sh b/packaging/bin/create-rpm-repo.sh new file mode 100755 index 000000000000..9dd84ebd685e --- /dev/null +++ b/packaging/bin/create-rpm-repo.sh @@ -0,0 +1,49 @@ +#!/bin/bash +set -eEuo pipefail + +# External dependencies: +# - https://github.com/rpm-software-management/createrepo +# - https://github.com/s3tools/s3cmd +# s3cmd expects AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY to be set in the +# environment. +# - https://gnupg.org/ +# For signing the script expects the private signing key to already be +# imported and the `rpm` command configured for signing, e.g. ~/.rpmmacros +# should exist. + +_s3bucket="${S3_BUCKET-dl-k6-io}" +_usage="Usage: $0 [s3bucket=${_s3bucket}]" +PKGDIR="${1?${_usage}}" # The directory where .rpm files are located +REPODIR="${2?${_usage}}" # The package repository working directory +S3PATH="${3-${_s3bucket}}/rpm" + +architectures="x86_64" +# TODO: Replace with CDN URL +#repobaseurl="https://dl.k6.io/rpm" +repobaseurl="http://test-dl-k6-io.s3-website.eu-north-1.amazonaws.com/rpm" + +# TODO: Remove old package versions? +# Something like: https://github.com/kopia/kopia/blob/master/tools/apt-publish.sh#L23-L25 + +mkdir -p "$REPODIR" && cd "$_" + +for arch in $architectures; do + mkdir -p "$arch" && cd "$_" + # Download existing packages via the CDN to avoid S3 egress costs. + # TODO: Also check their hashes? Or just sync them with s3cmd which does MD5 checks... + files=$(s3cmd ls "s3://${S3PATH}/${arch}/" | { grep -oP "(?<=/${S3PATH}/).*\.rpm" || true; }) + # curl supports parallel downloads with the -Z option since v7.68.0, but + # unfortunately Debian carries an older version, hence xargs. + echo "$files" | xargs -r -I{} -n1 -P"$(nproc)" curl -fsSLOR "$repobaseurl/{}" + + # Copy the new packages in and generate signatures + # FIXME: The architecture naming used by yum docs and in public RPM repos is + # "x86_64", whereas our packages are named with "amd64". So we do a replacement + # here, but we should probably consider naming them with "x86_64" instead. + find "$PKGDIR" -name "*${arch/x86_64/amd64}*.rpm" -type f -print0 | while read -r -d $'\0' f; do + cp -av "$f" "$PWD/" + rpm --addsign "${f##*/}" + done + createrepo . + cd - +done diff --git a/packaging/bin/entrypoint.sh b/packaging/bin/entrypoint.sh new file mode 100755 index 000000000000..cb4c010c742a --- /dev/null +++ b/packaging/bin/entrypoint.sh @@ -0,0 +1,38 @@ +#!/bin/bash +set -eEuo pipefail + +log() { + echo "$(date -Iseconds) $*" +} + +signkeypath="$PWD/sign-key.gpg" + +if ! [ -r "$signkeypath" ]; then + log "ERROR: Signing key not found at '$signkeypath'" + exit 1 +fi + +gpg2 --import --batch --passphrase="$PGP_SIGN_KEY_PASSPHRASE" "$signkeypath" +export PGPKEYID="$(gpg2 --list-secret-keys --with-colons | grep '^sec' | cut -d: -f5)" + +# Setup RPM signing +cat > "$HOME/.rpmmacros" < + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

""" + f'{path_top_dir.name}' + """

+
+
+
+ + + + + + + + + + + + + + + + + + +""") + + # sort dirs first + sorted_entries = sorted(path_top_dir.glob(glob_patt), key= lambda p: (p.is_file(), p.name)) + + entry: Path + for entry in sorted_entries: + + # don't include index.html in the file listing + if entry.name.lower() == index_file_name.lower(): + continue + + if entry.is_dir() and opts.recursive: + process_dir(entry, opts) + + # From Python 3.6, os.access() accepts path-like objects + if (not entry.is_symlink()) and not os.access(str(entry), os.W_OK): + print(f"*** WARNING *** entry {entry.absolute()} is not writable! SKIPPING!") + continue + if opts.verbose: + print(f'{entry.absolute()}') + + size_bytes = -1 ## is a folder + size_pretty = '—' + last_modified = '-' + last_modified_human_readable = '-' + last_modified_iso = '' + try: + if entry.is_file(): + size_bytes = entry.stat().st_size + size_pretty = pretty_size(size_bytes) + + if entry.is_dir() or entry.is_file(): + last_modified = datetime.datetime.fromtimestamp(entry.stat().st_mtime).replace(microsecond=0) + last_modified_iso = last_modified.isoformat() + last_modified_human_readable = last_modified.strftime("%c") + + except Exception as e: + print('ERROR accessing file name:', e, entry) + continue + + entry_path = str(entry.name) + + if entry.is_dir() and not entry.is_symlink(): + entry_type = 'folder' + entry_path = os.path.join(entry.name, '') + + elif entry.is_dir() and entry.is_symlink(): + entry_type = 'folder-shortcut' + print('dir-symlink', entry.absolute()) + + elif entry.is_file() and entry.is_symlink(): + entry_type = 'file-shortcut' + print('file-symlink', entry.absolute()) + + else: + entry_type = 'file' + + index_file.write(f""" + + + + + + + +""") + + index_file.write(""" + +
NameSize + Modified +
+..
+ + + {entry.name} + + {size_pretty}
+
+
+ +""") + if index_file: + index_file.close() + + +# bytes pretty-printing +UNITS_MAPPING = [ + (1024 ** 5, ' PB'), + (1024 ** 4, ' TB'), + (1024 ** 3, ' GB'), + (1024 ** 2, ' MB'), + (1024 ** 1, ' KB'), + (1024 ** 0, (' byte', ' bytes')), +] + + +def pretty_size(bytes, units=UNITS_MAPPING): + """Human-readable file sizes. + + ripped from https://pypi.python.org/pypi/hurry.filesize/ + """ + for factor, suffix in units: + if bytes >= factor: + break + amount = int(bytes / factor) + + if isinstance(suffix, tuple): + singular, multiple = suffix + if amount == 1: + suffix = singular + else: + suffix = multiple + return str(amount) + suffix + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='''DESCRIPTION: + Generate directory index files (recursive is OFF by default). + Start from current dir or from folder passed as first positional argument. + Optionally filter by file types with --filter "*.py". ''') + + parser.add_argument('top_dir', + nargs='?', + action='store', + help='top folder from which to start generating indexes, ' + 'use current folder if not specified', + default=os.getcwd()) + + parser.add_argument('--filter', '-f', + help='only include files matching glob', + required=False) + + parser.add_argument('--recursive', '-r', + action='store_true', + help="recursively process nested dirs (FALSE by default)", + required=False) + + parser.add_argument('--verbose', '-v', + action='store_true', + help='***WARNING: this can take a very long time with complex file tree structures***' + ' verbosely list every processed file', + required=False) + + config = parser.parse_args(sys.argv[1:]) + process_dir(config.top_dir, config) diff --git a/packaging/docker-compose.yml b/packaging/docker-compose.yml new file mode 100644 index 000000000000..1ecbbe59e71b --- /dev/null +++ b/packaging/docker-compose.yml @@ -0,0 +1,14 @@ +version: '3.4' + +services: + packager: + build: . + image: k6io/packager:latest + environment: + - AWS_ACCESS_KEY_ID + - AWS_SECRET_ACCESS_KEY + - PGP_SIGN_KEY_PASSPHRASE + - S3_BUCKET=test-dl-k6-io + volumes: + - ../dist:/home/k6/dist + - ./sign-key.gpg:/home/k6/sign-key.gpg From a17983950988fb32d76bc7f370f446f7b2a8e1a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Mon, 22 Mar 2021 14:56:25 +0100 Subject: [PATCH 02/24] Disable CloudFront caching for repo metadata files --- packaging/bin/entrypoint.sh | 16 +++++++++++++++- packaging/docker-compose.yml | 2 +- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/packaging/bin/entrypoint.sh b/packaging/bin/entrypoint.sh index cb4c010c742a..8eba563844ed 100755 --- a/packaging/bin/entrypoint.sh +++ b/packaging/bin/entrypoint.sh @@ -6,6 +6,7 @@ log() { } signkeypath="$PWD/sign-key.gpg" +s3bucket="${S3_BUCKET-dl-k6-io}" if ! [ -r "$signkeypath" ]; then log "ERROR: Signing key not found at '$signkeypath'" @@ -33,6 +34,19 @@ log "Generating index.html ..." (cd dl.k6.io && generate_index.py -r) log "Syncing to S3 ..." -s3cmd sync ./dl.k6.io/ "s3://${S3_BUCKET-dl-k6-io}/" +s3cmd sync ./dl.k6.io/ "s3://${s3bucket}/" + +# Disable cache for repo metadata, so that new releases will be available +# immediately. +# TODO: Maybe do this inside each script? +# TODO: How to handle k6-latest-amd64.msi? Could it be an S3 redirect that is never cached? +s3cmd modify --add-header="Cache-Control:no-cache, max-age=0" \ + "s3://${s3bucket}/deb/dists/stable/"{Release,Release.gpg,InRelease} +s3cmd modify --add-header="Cache-Control:no-cache, max-age=0" \ + "s3://${s3bucket}/deb/dists/stable/main/binary-amd64"/Packages{,.gz,.bz2} +s3cmd --recursive modify --add-header="Cache-Control:no-cache, max-age=0" \ + "s3://${s3bucket}/rpm/x86_64/repodata" +s3cmd modify --recursive --exclude='*' --include='index.html' \ + --add-header='Cache-Control:no-cache, max-age=0' "s3://${s3bucket}/" exec "$@" diff --git a/packaging/docker-compose.yml b/packaging/docker-compose.yml index 1ecbbe59e71b..601e2b371732 100644 --- a/packaging/docker-compose.yml +++ b/packaging/docker-compose.yml @@ -8,7 +8,7 @@ services: - AWS_ACCESS_KEY_ID - AWS_SECRET_ACCESS_KEY - PGP_SIGN_KEY_PASSPHRASE - - S3_BUCKET=test-dl-k6-io + - S3_BUCKET=dl.staging.k6.io volumes: - ../dist:/home/k6/dist - ./sign-key.gpg:/home/k6/sign-key.gpg From b4cd8ea811bfde8515bc97236276507f79a20450 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Mon, 22 Mar 2021 15:48:25 +0100 Subject: [PATCH 03/24] Sync remote to local with s3cmd --- packaging/bin/create-deb-repo.sh | 16 +++------------- packaging/bin/create-msi-repo.sh | 17 +++++------------ packaging/bin/create-rpm-repo.sh | 12 +++--------- 3 files changed, 11 insertions(+), 34 deletions(-) diff --git a/packaging/bin/create-deb-repo.sh b/packaging/bin/create-deb-repo.sh index 8548e1814272..4c5d3e32da60 100755 --- a/packaging/bin/create-deb-repo.sh +++ b/packaging/bin/create-deb-repo.sh @@ -20,9 +20,6 @@ S3PATH="${3-${_s3bucket}}/deb" # We don't publish i386 packages, but the repo structure is needed for # compatibility on some systems. See https://unix.stackexchange.com/a/272916 . architectures="amd64 i386" -# TODO: Replace with CDN URL -#repobaseurl="https://dl.k6.io/deb" -repobaseurl="http://test-dl-k6-io.s3-website.eu-north-1.amazonaws.com/deb" # TODO: Remove old package versions? # Something like: https://github.com/kopia/kopia/blob/master/tools/apt-publish.sh#L23-L25 @@ -32,16 +29,9 @@ mkdir -p "$REPODIR" && cd "$_" for arch in $architectures; do bindir="dists/stable/main/binary-$arch" mkdir -p "$bindir" - # Download existing packages via the CDN to avoid S3 egress costs. - # An optimization might be to just append to the Packages file and upload it - # and the new package only, but updating the index.html would get messy and - # would be inconsistent with the RPM script which does require all packages to - # be present because of how createrepo works. - # TODO: Also check their hashes? Or just sync them with s3cmd which does MD5 checks... - files=$(s3cmd ls "s3://${S3PATH}/${bindir}/" | { grep -oP "(?<=/${S3PATH}/).*\.(deb|asc)" || true; }) - # curl supports parallel downloads with the -Z option since v7.68.0, but - # unfortunately Debian carries an older version, hence xargs. - echo "$files" | xargs -r -I{} -n1 -P"$(nproc)" curl -fsSLR -o "{}" "$repobaseurl/{}" + # Download existing files + s3cmd sync --exclude='*' --include='*.deb' --include='*.asc' \ + "s3://${S3PATH}/${bindir}/" "$bindir/" # Copy the new packages in find "$PKGDIR" -name "*$arch*.deb" -type f -print0 | xargs -r0 cp -t "$bindir" diff --git a/packaging/bin/create-msi-repo.sh b/packaging/bin/create-msi-repo.sh index c0d49cca6394..f3d28f3d26c0 100755 --- a/packaging/bin/create-msi-repo.sh +++ b/packaging/bin/create-msi-repo.sh @@ -12,20 +12,13 @@ PKGDIR="${1?${_usage}}" # The directory where .msi files are located REPODIR="${2?${_usage}}" # The package repository working directory S3PATH="${3-${_s3bucket}}/msi" -# TODO: Replace with CDN URL -#repobaseurl="https://dl.k6.io/msi" -repobaseurl="http://test-dl-k6-io.s3-website.eu-north-1.amazonaws.com/msi" - # TODO: Remove old package versions? # Something like: https://github.com/kopia/kopia/blob/master/tools/apt-publish.sh#L23-L25 -mkdir -p "$REPODIR" && cd "$_" +mkdir -p "$REPODIR" + +# Download existing packages +s3cmd sync --exclude='*' --include='*.msi' "s3://${S3PATH}/" "$REPODIR/" -# Download existing packages via the CDN to avoid S3 egress costs. -# For MSIs this is just needed to generate the index correctly. -# TODO: Also check their hashes? Or just sync them with s3cmd which does MD5 checks... -files=$(s3cmd ls "s3://${S3PATH}/" | { grep -oP "(?<=/${S3PATH}/).*\.msi" || true; }) -# curl supports parallel downloads with the -Z option since v7.68.0, but -# unfortunately Debian carries an older version, hence xargs. -echo "$files" | xargs -r -I{} -n1 -P"$(nproc)" curl -fsSLOR "$repobaseurl/{}" +# Copy the new packages in find "$PKGDIR" -name "*.msi" -type f -print0 | xargs -r0 cp -t "$REPODIR" diff --git a/packaging/bin/create-rpm-repo.sh b/packaging/bin/create-rpm-repo.sh index 9dd84ebd685e..1aea04626a6f 100755 --- a/packaging/bin/create-rpm-repo.sh +++ b/packaging/bin/create-rpm-repo.sh @@ -18,9 +18,6 @@ REPODIR="${2?${_usage}}" # The package repository working directory S3PATH="${3-${_s3bucket}}/rpm" architectures="x86_64" -# TODO: Replace with CDN URL -#repobaseurl="https://dl.k6.io/rpm" -repobaseurl="http://test-dl-k6-io.s3-website.eu-north-1.amazonaws.com/rpm" # TODO: Remove old package versions? # Something like: https://github.com/kopia/kopia/blob/master/tools/apt-publish.sh#L23-L25 @@ -29,12 +26,9 @@ mkdir -p "$REPODIR" && cd "$_" for arch in $architectures; do mkdir -p "$arch" && cd "$_" - # Download existing packages via the CDN to avoid S3 egress costs. - # TODO: Also check their hashes? Or just sync them with s3cmd which does MD5 checks... - files=$(s3cmd ls "s3://${S3PATH}/${arch}/" | { grep -oP "(?<=/${S3PATH}/).*\.rpm" || true; }) - # curl supports parallel downloads with the -Z option since v7.68.0, but - # unfortunately Debian carries an older version, hence xargs. - echo "$files" | xargs -r -I{} -n1 -P"$(nproc)" curl -fsSLOR "$repobaseurl/{}" + + # Download existing packages + s3cmd sync --exclude='*' --include='*.rpm' "s3://${S3PATH}/${arch}/" ./ # Copy the new packages in and generate signatures # FIXME: The architecture naming used by yum docs and in public RPM repos is From ff02aae4bfdf38b3be5906651929881a43e6fd31 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Tue, 23 Mar 2021 11:29:43 +0100 Subject: [PATCH 04/24] Add some TODOs --- packaging/bin/create-msi-repo.sh | 2 ++ packaging/bin/entrypoint.sh | 1 + 2 files changed, 3 insertions(+) diff --git a/packaging/bin/create-msi-repo.sh b/packaging/bin/create-msi-repo.sh index f3d28f3d26c0..7bb38eb9b592 100755 --- a/packaging/bin/create-msi-repo.sh +++ b/packaging/bin/create-msi-repo.sh @@ -18,6 +18,8 @@ S3PATH="${3-${_s3bucket}}/msi" mkdir -p "$REPODIR" # Download existing packages +# For MSI packages this is only done to be able to generate the index.html correctly. +# Should we fake it and create empty files that have the same timestamp and size as the original ones? s3cmd sync --exclude='*' --include='*.msi' "s3://${S3PATH}/" "$REPODIR/" # Copy the new packages in diff --git a/packaging/bin/entrypoint.sh b/packaging/bin/entrypoint.sh index 8eba563844ed..d2f43080128e 100755 --- a/packaging/bin/entrypoint.sh +++ b/packaging/bin/entrypoint.sh @@ -15,6 +15,7 @@ fi gpg2 --import --batch --passphrase="$PGP_SIGN_KEY_PASSPHRASE" "$signkeypath" export PGPKEYID="$(gpg2 --list-secret-keys --with-colons | grep '^sec' | cut -d: -f5)" +# TODO: Publish the pub key to S3 # Setup RPM signing cat > "$HOME/.rpmmacros" < Date: Tue, 23 Mar 2021 15:58:10 +0100 Subject: [PATCH 05/24] Add k6 logo to index.html generation script --- packaging/bin/generate_index.py | 119 ++++++++++++++++++-------------- 1 file changed, 67 insertions(+), 52 deletions(-) diff --git a/packaging/bin/generate_index.py b/packaging/bin/generate_index.py index 085f07abe5f3..3bd6a8f264d8 100755 --- a/packaging/bin/generate_index.py +++ b/packaging/bin/generate_index.py @@ -2,14 +2,14 @@ # --- # Copyright 2020 glowinthedark # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. # # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and limitations under the License. # --- @@ -58,46 +58,48 @@ def process_dir(top_dir, opts):