fix: [N, 1] pattern should take N to create 1d causal mask #135
Workflow file for this run
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: gpu_test_gh | |
on: | |
workflow_dispatch: {} | |
pull_request: | |
paths: | |
- "xformers/**" | |
- "!xformers/benchmarks/**" | |
- "!xformers/version.txt" | |
- ".github/workflows/gpu_test_gh*" | |
- "tests/**" | |
- "setup.py" | |
- "requirements*.txt" | |
- "third_party/**" | |
push: | |
branches: | |
- main | |
env: | |
XFORMERS_BUILD_TYPE: "Release" | |
CI: "1" | |
TORCHINDUCTOR_COMPILE_THREADS: "1" | |
jobs: | |
gpu_test_gh: | |
strategy: | |
fail-fast: false | |
matrix: | |
gpu: | |
- runner: "h100" | |
sm: "9.0a" | |
- runner: "4-core-ubuntu-gpu-t4" | |
sm: "7.5" | |
pytorch: | |
- channel: pytorch-nightly | |
cuda: 12.1 | |
version: 2 | |
python: [3.11] | |
name: test_sm${{ matrix.gpu.sm }}_cu${{ matrix.pytorch.cuda }} | |
runs-on: ${{ matrix.gpu.runner }} | |
timeout-minutes: 360 | |
defaults: | |
run: | |
shell: bash -l {0} | |
steps: | |
- name: Recursive checkout | |
uses: actions/checkout@v3 | |
with: | |
submodules: recursive | |
path: "." | |
- run: nvidia-smi | |
- name: Install micromamba | |
run: | | |
set -ex | |
curl -Ls https://micro.mamba.pm/api/micromamba/linux-64/latest | tar -xvj bin/micromamba | |
echo "eval \"\$($(pwd)/bin/micromamba shell hook --shell bash)\"" >> ~/.profile | |
cat ~/.profile | |
- name: Create environment | |
run: | | |
set -ex | |
micromamba config set channel_priority strict | |
micromamba create -n env python=${{ matrix.python }} \ | |
zlib pip ninja pytorch=${{ matrix.pytorch.version }} ccache=4.8 pytorch-mutex==1.0=cuda pytorch-cuda=${{ matrix.pytorch.cuda }} \ | |
cuda-libraries-dev cuda-nvcc \ | |
-c nvidia -c ${{ matrix.pytorch.channel }} -c conda-forge -q -y | |
- name: Activate environment | |
shell: bash -l {0} | |
run: | | |
echo "micromamba activate env" >> ~/.profile | |
echo "==== .profile =====" | |
cat ~/.profile | |
- name: Setup test requirements | |
run: | | |
which python | |
pip install -r requirements-test.txt --progress-bar off | |
- run: TORCH_CUDA_ARCH_LIST=${{ matrix.gpu.sm }} python setup.py develop | |
- run: python -m xformers.info | |
- name: xFormers import should not init cuda context | |
run: | | |
# NOTE: we check GPU version by default to determine if triton should be used | |
# and this initializes CUDA context, unless we set `XFORMERS_ENABLE_TRITON` | |
XFORMERS_ENABLE_TRITON=1 python -c "import xformers; import xformers.ops; import torch; assert not torch.cuda.is_initialized()" | |
- name: Unit tests | |
run: | | |
python -m pytest --verbose --random-order-bucket=global --maxfail=20 --junitxml=test-results/junit.xml --cov-report=xml --cov=./ tests | |
- name: Publish Test Report | |
uses: mikepenz/action-junit-report@v3 | |
if: success() || failure() # always run even if the previous step fails | |
with: | |
report_paths: 'test-results/*.xml' |