diff --git a/.github/workflows/all.yml b/.github/workflows/all.yml index dd77f6e474c..d358c677440 100644 --- a/.github/workflows/all.yml +++ b/.github/workflows/all.yml @@ -366,13 +366,15 @@ jobs: done hub release create "${assets[@]}" -m "$VERSION" -m "$(cat ./release\ notes/${VERSION}.md)" "$VERSION" - publish-bintray: + publish-packages: runs-on: ubuntu-latest needs: [configure, build, package-windows] if: startsWith(github.ref, 'refs/tags/v') env: VERSION: ${{ needs.configure.outputs.version }} steps: + - name: Checkout code + uses: actions/checkout@v2 - name: Download binaries uses: actions/download-artifact@v2 with: @@ -383,13 +385,19 @@ jobs: with: name: binaries-windows path: dist - - name: Upload packages to Bintray + - name: Setup docker-compose environment + run: | + cat > packaging/.env < packaging/sign-key.gpg + - name: Publish packages run: | - curl -fsS -H "X-GPG-PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}" -T "dist/k6-$VERSION-amd64.deb" \ - "https://${{ secrets.BINTRAY_USER }}:${{ secrets.BINTRAY_KEY }}@api.bintray.com/content/loadimpact/deb/k6/${VERSION#v}/k6-${VERSION}-amd64.deb;deb_distribution=stable;deb_component=main;deb_architecture=amd64;publish=1;override=1" - curl -fsS -H "X-GPG-PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}" -T "dist/k6-$VERSION-amd64.rpm" \ - "https://${{ secrets.BINTRAY_USER }}:${{ secrets.BINTRAY_KEY }}@api.bintray.com/content/loadimpact/rpm/k6/${VERSION#v}/k6-${VERSION}-amd64.rpm?publish=1&override=1" - curl -fsS -H "X-GPG-PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}" -T "dist/k6-$VERSION-win64.msi" \ - "https://${{ secrets.BINTRAY_USER }}:${{ secrets.BINTRAY_KEY }}@api.bintray.com/content/loadimpact/windows/k6/${VERSION#v}/k6-${VERSION}-amd64.msi?publish=1&override=1" - curl -fsS -H "X-GPG-PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}" -T "dist/k6.portable.${VERSION#v}.nupkg" \ - "https://${{ secrets.BINTRAY_USER }}:${{ secrets.BINTRAY_KEY }}@api.bintray.com/content/loadimpact/choco/k6.portable/${VERSION#v}/k6.portable.${VERSION}.nupkg?publish=1&override=1" + echo "${{ secrets.CR_PAT }}" | docker login https://ghcr.io -u ${{ github.actor }} --password-stdin + cd packaging + docker-compose pull packager + docker-compose run --rm packager diff --git a/.github/workflows/packager.yml b/.github/workflows/packager.yml new file mode 100644 index 00000000000..ef9fc08e499 --- /dev/null +++ b/.github/workflows/packager.yml @@ -0,0 +1,32 @@ +name: k6packager +on: + # Enable manually triggering this workflow via the API or web UI + workflow_dispatch: + schedule: + - cron: '0 0 * * 0' # weekly (Sundays at 00:00) + +defaults: + run: + shell: bash + +jobs: + publish-packager: + runs-on: ubuntu-latest + env: + VERSION: 0.0.1 + AWSCLI_VERSION: 2.1.36 + DOCKER_IMAGE_ID: k6io/k6packager + steps: + - name: Checkout code + uses: actions/checkout@v2 + - name: Build + run: | + cd packaging + docker-compose build packager + - name: Publish + run: | + echo "${{ secrets.CR_PAT }}" | docker login https://ghcr.io -u ${{ github.actor }} --password-stdin + docker tag "$DOCKER_IMAGE_ID" "ghcr.io/${DOCKER_IMAGE_ID}:${VERSION}" + docker push "ghcr.io/${DOCKER_IMAGE_ID}:${VERSION}" + docker tag "$DOCKER_IMAGE_ID" "ghcr.io/${DOCKER_IMAGE_ID}:latest" + docker push "ghcr.io/${DOCKER_IMAGE_ID}:latest" diff --git a/.gitignore b/.gitignore index 6a7de5cfdd5..0f16c71b00e 100644 --- a/.gitignore +++ b/.gitignore @@ -24,4 +24,7 @@ !/vendor/modules.txt /vendor/**/*.y*ml /vendor/**/.*.y*ml -/vendor/github.com/dlclark/regexp2/testoutput1 \ No newline at end of file +/vendor/github.com/dlclark/regexp2/testoutput1 + +/packaging/.env +/packaging/*.gpg diff --git a/packaging/Dockerfile b/packaging/Dockerfile new file mode 100644 index 00000000000..57bede01b59 --- /dev/null +++ b/packaging/Dockerfile @@ -0,0 +1,35 @@ +FROM debian:buster-20210311 + +LABEL maintainer="k6 Developers " + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update -y && \ + apt-get install -y apt-utils createrepo curl git gnupg2 python3 unzip + +COPY ./awscli-key.gpg . + +ARG AWSCLI_VERSION + +# Download awscli, check GPG signature and install. +RUN export GNUPGHOME="$(mktemp -d)" && \ + gpg2 --import ./awscli-key.gpg && \ + fpr="$(gpg2 --with-colons --fingerprint aws-cli | grep '^fpr' | cut -d: -f10)" && \ + gpg2 --export-ownertrust && echo "${fpr}:6:" | gpg2 --import-ownertrust && \ + curl -fsSL --remote-name-all \ + "https://awscli.amazonaws.com/awscli-exe-linux-x86_64${AWSCLI_VERSION:+-$AWSCLI_VERSION}.zip"{,.sig} && \ + gpg2 --verify awscli*.sig awscli*.zip && \ + unzip -q awscli*.zip && \ + ./aws/install && \ + rm -rf aws* "$GNUPGHOME" + +RUN addgroup --gid 1000 k6 && \ + useradd --create-home --shell /bin/bash --no-log-init \ + --uid 1000 --gid 1000 k6 + +COPY bin/ /usr/local/bin/ + +USER k6 +WORKDIR /home/k6 + +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] diff --git a/packaging/awscli-key.gpg b/packaging/awscli-key.gpg new file mode 100644 index 00000000000..595184845c6 --- /dev/null +++ b/packaging/awscli-key.gpg @@ -0,0 +1,29 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBF2Cr7UBEADJZHcgusOJl7ENSyumXh85z0TRV0xJorM2B/JL0kHOyigQluUG +ZMLhENaG0bYatdrKP+3H91lvK050pXwnO/R7fB/FSTouki4ciIx5OuLlnJZIxSzx +PqGl0mkxImLNbGWoi6Lto0LYxqHN2iQtzlwTVmq9733zd3XfcXrZ3+LblHAgEt5G +TfNxEKJ8soPLyWmwDH6HWCnjZ/aIQRBTIQ05uVeEoYxSh6wOai7ss/KveoSNBbYz +gbdzoqI2Y8cgH2nbfgp3DSasaLZEdCSsIsK1u05CinE7k2qZ7KgKAUIcT/cR/grk +C6VwsnDU0OUCideXcQ8WeHutqvgZH1JgKDbznoIzeQHJD238GEu+eKhRHcz8/jeG +94zkcgJOz3KbZGYMiTh277Fvj9zzvZsbMBCedV1BTg3TqgvdX4bdkhf5cH+7NtWO +lrFj6UwAsGukBTAOxC0l/dnSmZhJ7Z1KmEWilro/gOrjtOxqRQutlIqG22TaqoPG +fYVN+en3Zwbt97kcgZDwqbuykNt64oZWc4XKCa3mprEGC3IbJTBFqglXmZ7l9ywG +EEUJYOlb2XrSuPWml39beWdKM8kzr1OjnlOm6+lpTRCBfo0wa9F8YZRhHPAkwKkX +XDeOGpWRj4ohOx0d2GWkyV5xyN14p2tQOCdOODmz80yUTgRpPVQUtOEhXQARAQAB +tCFBV1MgQ0xJIFRlYW0gPGF3cy1jbGlAYW1hem9uLmNvbT6JAlQEEwEIAD4WIQT7 +Xbd/1cEYuAURraimMQrMRnJHXAUCXYKvtQIbAwUJB4TOAAULCQgHAgYVCgkICwIE +FgIDAQIeAQIXgAAKCRCmMQrMRnJHXJIXEAChLUIkg80uPUkGjE3jejvQSA1aWuAM +yzy6fdpdlRUz6M6nmsUhOExjVIvibEJpzK5mhuSZ4lb0vJ2ZUPgCv4zs2nBd7BGJ +MxKiWgBReGvTdqZ0SzyYH4PYCJSE732x/Fw9hfnh1dMTXNcrQXzwOmmFNNegG0Ox +au+VnpcR5Kz3smiTrIwZbRudo1ijhCYPQ7t5CMp9kjC6bObvy1hSIg2xNbMAN/Do +ikebAl36uA6Y/Uczjj3GxZW4ZWeFirMidKbtqvUz2y0UFszobjiBSqZZHCreC34B +hw9bFNpuWC/0SrXgohdsc6vK50pDGdV5kM2qo9tMQ/izsAwTh/d/GzZv8H4lV9eO +tEis+EpR497PaxKKh9tJf0N6Q1YLRHof5xePZtOIlS3gfvsH5hXA3HJ9yIxb8T0H +QYmVr3aIUes20i6meI3fuV36VFupwfrTKaL7VXnsrK2fq5cRvyJLNzXucg0WAjPF +RrAGLzY7nP1xeg1a0aeP+pdsqjqlPJom8OCWc1+6DWbg0jsC74WoesAqgBItODMB +rsal1y/q+bPzpsnWjzHV8+1/EtZmSc8ZUGSJOPkfC7hObnfkl18h+1QtKTjZme4d +H17gsBJr+opwJw/Zio2LMjQBOqlm3K1A4zFTh7wBC7He6KPQea1p2XAMgtvATtNe +YLZATHZKTJyiqA== +=vYOk +-----END PGP PUBLIC KEY BLOCK----- diff --git a/packaging/bin/create-deb-repo.sh b/packaging/bin/create-deb-repo.sh new file mode 100755 index 00000000000..4e6beeb548b --- /dev/null +++ b/packaging/bin/create-deb-repo.sh @@ -0,0 +1,116 @@ +#!/bin/bash +set -eEuo pipefail + +# External dependencies: +# - https://salsa.debian.org/apt-team/apt (apt-ftparchive, packaged in apt-utils) +# - https://aws.amazon.com/cli/ +# awscli expects AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY to be set in the +# environment. +# - https://gnupg.org/ +# For signing the script expects the private signing key to already be +# imported and PGPKEYID and PGP_SIGN_KEY_PASSPHRASE to be set in the +# environment. +# - generate_index.py +# For generating the index.html of each directory. It's available in the +# packaging/bin directory of the k6 repo, and should be in $PATH. + +_s3bucket="${S3_BUCKET-dl.k6.io}" +_usage="Usage: $0 [s3bucket=${_s3bucket}]" +PKGDIR="${1?${_usage}}" # The directory where .deb files are located +REPODIR="${2?${_usage}}" # The package repository working directory +S3PATH="${3-${_s3bucket}}/deb" +# Remove packages older than N number of days (730 is roughly ~2 years). +REMOVE_PKG_DAYS=730 + +log() { + echo "$(date -Iseconds) $*" +} + +delete_old_pkgs() { + find "$1" -name '*.deb' -type f -daystart -mtime "+${REMOVE_PKG_DAYS}" -print0 | xargs -r0 rm -v + + # Remove any dangling .asc files + find "$1" -name '*.asc' -type f -print0 | while read -r -d $'\0' f; do + if ! [ -r "${f%.*}" ]; then + rm -v "$f" + fi + done +} + +sync_to_s3() { + log "Syncing to S3 ..." + aws s3 sync --no-progress --delete "${REPODIR}/" "s3://${S3PATH}/" + + # Set a short cache expiration for index and repo metadata files. + aws s3 cp --no-progress --recursive \ + --exclude='*.deb' --exclude='*.asc' --exclude='*.html' \ + --cache-control='max-age=60,must-revalidate' \ + --metadata-directive=REPLACE \ + "s3://${S3PATH}" "s3://${S3PATH}" + # Set it separately for HTML files to set the correct Content-Type. + aws s3 cp --no-progress --recursive \ + --exclude='*' --include='*.html' \ + --content-type='text/html' \ + --cache-control='max-age=60,must-revalidate' \ + --metadata-directive=REPLACE \ + "s3://${S3PATH}" "s3://${S3PATH}" +} + +# We don't publish i386 packages, but the repo structure is needed for +# compatibility on some systems. See https://unix.stackexchange.com/a/272916 . +architectures="amd64 i386" + +pushd . > /dev/null +mkdir -p "$REPODIR" && cd "$_" + +for arch in $architectures; do + bindir="dists/stable/main/binary-$arch" + mkdir -p "$bindir" + # Download existing files + aws s3 sync --no-progress --exclude='*' --include='*.deb' --include='*.asc' \ + "s3://${S3PATH}/${bindir}/" "$bindir/" + + # Copy the new packages in + find "$PKGDIR" -name "*$arch*.deb" -type f -print0 | xargs -r0 cp -t "$bindir" + # Generate signatures for files that don't have it + # TODO: Switch to debsign instead? This is currently done as Bintray did it, + # but the signature is not validated by apt/dpkg. + # https://blog.packagecloud.io/eng/2014/10/28/howto-gpg-sign-verify-deb-packages-apt-repositories/ + find "$bindir" -type f -name '*.deb' -print0 | while read -r -d $'\0' f; do + if ! [ -r "${f}.asc" ]; then + gpg2 --default-key="$PGPKEYID" --passphrase="$PGP_SIGN_KEY_PASSPHRASE" \ + --pinentry-mode=loopback --yes --detach-sign --armor -o "${f}.asc" "$f" + fi + done + apt-ftparchive packages "$bindir" | tee "$bindir/Packages" + gzip -fk "$bindir/Packages" + bzip2 -fk "$bindir/Packages" + + delete_old_pkgs "$bindir" +done + +log "Creating release file..." +apt-ftparchive release \ + -o APT::FTPArchive::Release::Origin="k6" \ + -o APT::FTPArchive::Release::Label="k6" \ + -o APT::FTPArchive::Release::Suite="stable" \ + -o APT::FTPArchive::Release::Codename="stable" \ + -o APT::FTPArchive::Release::Architectures="$architectures" \ + -o APT::FTPArchive::Release::Components="main" \ + -o APT::FTPArchive::Release::Date="$(date -Ru)" \ + "dists/stable" > "dists/stable/Release" + +# Sign release file +gpg2 --default-key="$PGPKEYID" --passphrase="$PGP_SIGN_KEY_PASSPHRASE" \ + --pinentry-mode=loopback --yes --detach-sign --armor \ + -o "dists/stable/Release.gpg" "dists/stable/Release" +gpg2 --default-key="$PGPKEYID" --passphrase="$PGP_SIGN_KEY_PASSPHRASE" \ + --pinentry-mode=loopback --yes --clear-sign \ + -o "dists/stable/InRelease" "dists/stable/Release" + +log "Generating index.html ..." +generate_index.py -r + +popd > /dev/null + +sync_to_s3 diff --git a/packaging/bin/create-msi-repo.sh b/packaging/bin/create-msi-repo.sh new file mode 100755 index 00000000000..afd6f1f4c40 --- /dev/null +++ b/packaging/bin/create-msi-repo.sh @@ -0,0 +1,66 @@ +#!/bin/bash +set -eEuo pipefail + +# External dependencies: +# - https://aws.amazon.com/cli/ +# awscli expects AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY to be set in the +# environment. +# - generate_index.py +# For generating the index.html of each directory. It's available in the +# packaging/bin directory of the k6 repo, and should be in $PATH. + +_s3bucket="${S3_BUCKET-dl.k6.io}" +_usage="Usage: $0 [s3bucket=${_s3bucket}]" +PKGDIR="${1?${_usage}}" # The directory where .msi files are located +REPODIR="${2?${_usage}}" # The package repository working directory +S3PATH="${3-${_s3bucket}}/msi" +# Remove packages older than N number of days (730 is roughly ~2 years). +REMOVE_PKG_DAYS=730 + +log() { + echo "$(date -Iseconds) $*" +} + +delete_old_pkgs() { + find "$1" -name '*.msi' -type f -daystart -mtime "+${REMOVE_PKG_DAYS}" -print0 | xargs -r0 rm -v +} + +sync_to_s3() { + log "Syncing to S3 ..." + aws s3 sync --no-progress --delete "${REPODIR}/" "s3://${S3PATH}/" + + # Set a short cache expiration for index files and the latest MSI package. + aws s3 cp --no-progress --recursive --exclude='*' \ + --include='*.html' \ + --cache-control='max-age=60,must-revalidate' \ + --content-type='text/html' \ + --metadata-directive=REPLACE \ + "s3://${S3PATH}" "s3://${S3PATH}" + aws s3 cp --no-progress \ + --cache-control='max-age=60,must-revalidate' \ + --content-type='application/x-msi' \ + --metadata-directive=REPLACE \ + "s3://${S3PATH}/k6-latest-amd64.msi" "s3://${S3PATH}/k6-latest-amd64.msi" +} + +mkdir -p "$REPODIR" + +# Download existing packages +# For MSI packages this is only done to be able to generate the index.html correctly. +# Should we fake it and create empty files that have the same timestamp and size as the original ones? +aws s3 sync --no-progress --exclude='*' --include='*.msi' "s3://${S3PATH}/" "$REPODIR/" + +# Copy the new packages in +find "$PKGDIR" -name "*.msi" -type f -print0 | xargs -r0 cp -t "$REPODIR" + +delete_old_pkgs "$REPODIR" + +# Update the latest package. This could be done with S3 redirects, but +# CloudFront caches redirects aggressively and I wasn't able to invalidate it. +latest="$(find "$REPODIR" -name '*.msi' -printf '%P\n' | sort | tail -1)" +cp -p "${REPODIR}/${latest}" "${REPODIR}/k6-latest-amd64.msi" + +log "Generating index.html ..." +(cd "$REPODIR" && generate_index.py -r) + +sync_to_s3 diff --git a/packaging/bin/create-rpm-repo.sh b/packaging/bin/create-rpm-repo.sh new file mode 100755 index 00000000000..4d0e1ed4826 --- /dev/null +++ b/packaging/bin/create-rpm-repo.sh @@ -0,0 +1,81 @@ +#!/bin/bash +set -eEuo pipefail + +# External dependencies: +# - https://github.com/rpm-software-management/createrepo +# - https://aws.amazon.com/cli/ +# awscli expects AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY to be set in the +# environment. +# - https://gnupg.org/ +# For signing the script expects the private signing key to already be +# imported and the `rpm` command configured for signing, e.g. ~/.rpmmacros +# should exist. +# - generate_index.py +# For generating the index.html of each directory. It's available in the +# packaging/bin directory of the k6 repo, and should be in $PATH. + +_s3bucket="${S3_BUCKET-dl.k6.io}" +_usage="Usage: $0 [s3bucket=${_s3bucket}]" +PKGDIR="${1?${_usage}}" # The directory where .rpm files are located +REPODIR="${2?${_usage}}" # The package repository working directory +S3PATH="${3-${_s3bucket}}/rpm" +# Remove packages older than N number of days (730 is roughly ~2 years). +REMOVE_PKG_DAYS=730 + +log() { + echo "$(date -Iseconds) $*" +} + +delete_old_pkgs() { + find "$1" -name '*.rpm' -type f -daystart -mtime "+${REMOVE_PKG_DAYS}" -print0 | xargs -r0 rm -v +} + +sync_to_s3() { + log "Syncing to S3 ..." + aws s3 sync --no-progress --delete "${REPODIR}/" "s3://${S3PATH}/" + + # Set a short cache expiration for index and repo metadata files. + aws s3 cp --no-progress --recursive --exclude='*.rpm' \ + --cache-control='max-age=60,must-revalidate' \ + --metadata-directive=REPLACE \ + "s3://${S3PATH}" "s3://${S3PATH}" + # Set it separately for HTML files to set the correct Content-Type. + aws s3 cp --no-progress --recursive \ + --exclude='*' --include='*.html' \ + --content-type='text/html' \ + --cache-control='max-age=60,must-revalidate' \ + --metadata-directive=REPLACE \ + "s3://${S3PATH}" "s3://${S3PATH}" +} + +architectures="x86_64" + +pushd . > /dev/null +mkdir -p "$REPODIR" && cd "$_" + +for arch in $architectures; do + mkdir -p "$arch" && cd "$_" + + # Download existing packages + aws s3 sync --no-progress --exclude='*' --include='*.rpm' "s3://${S3PATH}/${arch}/" ./ + + # Copy the new packages in and generate signatures + # FIXME: The architecture naming used by yum docs and in public RPM repos is + # "x86_64", whereas our packages are named with "amd64". So we do a replacement + # here, but we should probably consider naming them with "x86_64" instead. + find "$PKGDIR" -name "*${arch/x86_64/amd64}*.rpm" -type f -print0 | while read -r -d $'\0' f; do + cp -av "$f" "$PWD/" + rpm --addsign "${f##*/}" + done + createrepo . + cd - + + delete_old_pkgs "$arch" +done + +log "Generating index.html ..." +generate_index.py -r + +popd > /dev/null + +sync_to_s3 diff --git a/packaging/bin/entrypoint.sh b/packaging/bin/entrypoint.sh new file mode 100755 index 00000000000..893970a00d8 --- /dev/null +++ b/packaging/bin/entrypoint.sh @@ -0,0 +1,55 @@ +#!/bin/bash +set -eEuo pipefail + +log() { + echo "$(date -Iseconds) $*" +} + +signkeypath="$PWD/sign-key.gpg" +s3bucket="${S3_BUCKET-dl.k6.io}" +pkgdir="$PWD/Packages" + +if ! [ -r "$signkeypath" ]; then + log "ERROR: Signing key not found at '$signkeypath'" + exit 1 +fi + +gpg2 --import --batch --passphrase="$PGP_SIGN_KEY_PASSPHRASE" "$signkeypath" +export PGPKEYID="$(gpg2 --list-secret-keys --with-colons | grep '^sec' | cut -d: -f5)" +mkdir -p "$pkgdir" +gpg2 --export --armor --output "${pkgdir}/key.gpg" "$PGPKEYID" + +# Setup RPM signing +cat > "$HOME/.rpmmacros" < + + + + k6 Packages + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +

""" + f'{path_top_dir.name}' + """

+
+
+
+ + + + + + + + + + + + + + + + + + +""") + + # sort dirs first + sorted_entries = sorted(path_top_dir.glob(glob_patt), key= lambda p: (p.is_file(), p.name)) + + entry: Path + for entry in sorted_entries: + + # don't include index.html in the file listing + if entry.name.lower() == index_file_name.lower(): + continue + + if entry.is_dir() and opts.recursive: + process_dir(entry, opts) + + # From Python 3.6, os.access() accepts path-like objects + if (not entry.is_symlink()) and not os.access(str(entry), os.W_OK): + print(f"*** WARNING *** entry {entry.absolute()} is not writable! SKIPPING!") + continue + if opts.verbose: + print(f'{entry.absolute()}') + + size_bytes = -1 ## is a folder + size_pretty = '—' + last_modified = '-' + last_modified_human_readable = '-' + last_modified_iso = '' + try: + if entry.is_file(): + size_bytes = entry.stat().st_size + size_pretty = pretty_size(size_bytes) + + if entry.is_dir() or entry.is_file(): + last_modified = datetime.datetime.fromtimestamp(entry.stat().st_mtime).replace(microsecond=0) + last_modified_iso = last_modified.isoformat() + last_modified_human_readable = last_modified.strftime("%c") + + except Exception as e: + print('ERROR accessing file name:', e, entry) + continue + + entry_path = str(entry.name) + + if entry.is_dir() and not entry.is_symlink(): + entry_type = 'folder' + entry_path = os.path.join(entry.name, '') + + elif entry.is_dir() and entry.is_symlink(): + entry_type = 'folder-shortcut' + print('dir-symlink', entry.absolute()) + + elif entry.is_file() and entry.is_symlink(): + entry_type = 'file-shortcut' + print('file-symlink', entry.absolute()) + + else: + entry_type = 'file' + + index_file.write(f""" + + + + + + + +""") + + index_file.write(""" + +
NameSize + Modified +
+..
+ + + {entry.name} + + {size_pretty}
+
+
+ +""") + if index_file: + index_file.close() + + +# bytes pretty-printing +UNITS_MAPPING = [ + (1024 ** 5, ' PB'), + (1024 ** 4, ' TB'), + (1024 ** 3, ' GB'), + (1024 ** 2, ' MB'), + (1024 ** 1, ' KB'), + (1024 ** 0, (' byte', ' bytes')), +] + + +def pretty_size(bytes, units=UNITS_MAPPING): + """Human-readable file sizes. + + ripped from https://pypi.python.org/pypi/hurry.filesize/ + """ + for factor, suffix in units: + if bytes >= factor: + break + amount = int(bytes / factor) + + if isinstance(suffix, tuple): + singular, multiple = suffix + if amount == 1: + suffix = singular + else: + suffix = multiple + return str(amount) + suffix + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='''DESCRIPTION: + Generate directory index files (recursive is OFF by default). + Start from current dir or from folder passed as first positional argument. + Optionally filter by file types with --filter "*.py". ''') + + parser.add_argument('top_dir', + nargs='?', + action='store', + help='top folder from which to start generating indexes, ' + 'use current folder if not specified', + default=os.getcwd()) + + parser.add_argument('--filter', '-f', + help='only include files matching glob', + required=False) + + parser.add_argument('--recursive', '-r', + action='store_true', + help="recursively process nested dirs (FALSE by default)", + required=False) + + parser.add_argument('--verbose', '-v', + action='store_true', + help='***WARNING: this can take a very long time with complex file tree structures***' + ' verbosely list every processed file', + required=False) + + config = parser.parse_args(sys.argv[1:]) + process_dir(config.top_dir, config) diff --git a/packaging/docker-compose.yml b/packaging/docker-compose.yml new file mode 100644 index 00000000000..704a7906325 --- /dev/null +++ b/packaging/docker-compose.yml @@ -0,0 +1,19 @@ +version: '3.4' + +services: + packager: + build: + context: . + args: + - AWSCLI_VERSION=${AWSCLI_VERSION:-2.1.36} + image: ghcr.io/k6io/k6packager:latest + environment: + - AWS_ACCESS_KEY_ID + - AWS_SECRET_ACCESS_KEY + - AWS_DEFAULT_REGION + - AWS_CF_DISTRIBUTION + - PGP_SIGN_KEY_PASSPHRASE + - S3_BUCKET=dl.k6.io + volumes: + - ../dist:/home/k6/dist + - ./sign-key.gpg:/home/k6/sign-key.gpg