diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000..9e02085f3 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,21 @@ +## Type of Change + +feature or bug fix or documentation or others +API changed or not + +## Description + +detail description +Issues: xxx + +## Expected Behavior & Potential Risk + +the expected behavior that triggered by this PR + +## How has this PR been tested? + +how to reproduce the test (including hardware information) + +## Dependency Change? + +any library dependency introduced or removed diff --git a/.github/workflows/copyright_check.yml b/.github/workflows/copyright_check.yml new file mode 100644 index 000000000..0701faa35 --- /dev/null +++ b/.github/workflows/copyright_check.yml @@ -0,0 +1,80 @@ +name: Copyright Check + +on: + pull_request: + branches: [main] + paths: + - intel_extension_for_transformers/** + - setup.py + - .github/workflows/format_scan.yml + workflow_dispatch: + +# If there is a new commit, the previous jobs will be canceled +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + CODE_SCAN_LOG_PATH: "${{ github.workspace }}/log" + CONTAINER_NAME: "codeScan" + +jobs: + format-scan: + runs-on: itrex-node-spell + strategy: + matrix: + job_name: ["copyright"] + fail-fast: false + steps: + - name: Docker Clean Up + run: | + docker ps -a + if [[ $(docker ps -a | grep -i '${{ env.CONTAINER_NAME }}-${{ runner.name }}'$) ]]; then + docker start ${{ env.CONTAINER_NAME }}-${{ runner.name }} + echo "remove left files through container ..." + docker exec ${{ env.CONTAINER_NAME }}-${{ runner.name }} bash -c "ls -a /intel-extension-for-transformers && rm -fr /intel-extension-for-transformers/* && rm -fr /intel-extension-for-transformers/.* || true" + fi + + - name: Checkout out Repo + uses: actions/checkout@v3 + + - name: CopyRight check + run: | + source ${{ github.workspace }}/.github/workflows/script/change_color.sh + set -e + mkdir -p ${{ env.CODE_SCAN_LOG_PATH }} + supported_extensions=(py, sh, yaml) + git fetch + git --no-pager diff --name-only remotes/origin/${{ github.base_ref }} ${{ github.workspace }}/intel_extension_for_transformers> ${{ env.CODE_SCAN_LOG_PATH }}/diff.log + files=$(cat ${{ env.CODE_SCAN_LOG_PATH }}/diff.log | awk '!a[$0]++') + $LIGHT_PURPLE && echo " ----------------- checking ... --------------------------" && $RESET + if [[ -f ${{ env.CODE_SCAN_LOG_PATH }}/copyright_issue_summary.log ]]; then + rm -f ${{ env.CODE_SCAN_LOG_PATH }}/copyright_issue_summary.log + fi + for file in ${files} + do + if [[ "${supported_extensions[@]}" =~ "${file##*.}" ]]; then + if [ $(grep -E -c "Copyright \\(c\\) ([0-9]{4})(-[0-9]{4})? Intel Corporation" ${file}) = 0 ]; then + echo ${file} >> ${{ env.CODE_SCAN_LOG_PATH }}/copyright_issue_summary.log + $BOLD_YELLOW && echo " ----------------- Current log file output start --------------------------" + cat ${{ env.CODE_SCAN_LOG_PATH }}/copyright_issue_summary.log + $BOLD_YELLOW && echo " ----------------- Current log file output end --------------------------" && $RESET + $BOLD_RED && echo "CopyRight has something wrong! Please click on the artifact button to download and view the error log!" && $RESET + fi + else + $LIGHT_PURPLE && echo "Skipping ${file}" && $RESET + fi + done + if [[ -f ${{ env.CODE_SCAN_LOG_PATH }}/copyright_issue_summary.log ]]; then + $BOLD_YELLOW && echo " ----------------- Current log file output start --------------------------" + cat ${{ env.CODE_SCAN_LOG_PATH }}/copyright_issue_summary.log + $BOLD_YELLOW && echo " ----------------- Current log file output end --------------------------" && $RESET + $BOLD_RED && echo "CopyRight has something wrong! Please click on the artifact button to download and view the error log!" && $RESET && exit 1 + fi + + - name: Publish pipeline artifact + if: ${{ failure() }} + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.job_name }} + path: ${{ env.CODE_SCAN_LOG_PATH }}.* diff --git a/.github/workflows/cpp-graph-test.yml b/.github/workflows/cpp-graph-test.yml new file mode 100644 index 000000000..0d2ad97b1 --- /dev/null +++ b/.github/workflows/cpp-graph-test.yml @@ -0,0 +1,158 @@ +name: CPP Graph Test + +on: + pull_request: + branches: [main] + paths: + - '.github/workflows/cpp-graph-test.yml' + - '.github/workflows/script/models/cpp_graph_inference.sh' + - 'intel_extension_for_transformers/llm/runtime/graph/**' + - 'intel_extension_for_transformers/llm/library/jblas/**' + - '!intel_extension_for_transformers/llm/runtime/graph/README.md' + workflow_dispatch: + inputs: + compiler_version: + description: 'compiler_version' + required: false + type: string + default: '13.1.0' + models: + description: 'models (in json)' + required: false + type: string + default: '["llama-2-7b-chat", "gptj-6b"]' + runner: + description: 'runner' + required: false + type: string + default: 'spr' + +# If there is a new commit, the previous jobs will be canceled +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + OUT_SCRIPT_PATH: ${{ github.workspace }}/.github/workflows/script/models + SCRIPT_PATH: ${{ github.workspace }}/.github/workflows/script + WORKING_DIR: ${{ github.workspace }} + CONTAINER_NAME: "codeScan" + INPUT_COMPILER_VERSION: ${{ inputs.compiler_version || '13.1.0' }} + +jobs: + CPP-Graph-Workflow: + runs-on: ${{inputs.runner || 'spr'}} + strategy: + matrix: + modelName: ${{fromJson(inputs.models || '["llama-2-7b-chat", "gptj-6b"]')}} + steps: + - name: Checkout out Repo + uses: actions/checkout@v3 + with: + submodules: "recursive" + fetch-tags: true + + - name: Env build + run: | + bash ${{ github.workspace }}/.github/workflows/script/prepare_env_with_conda.sh "cpp-graph-test" "3.8" + + - name: Binary build + # cpp model does not requires itrex package + if: 0 == 1 + run: | + cd ${{ github.workspace }} + conda activate cpp-graph-test || source activate cpp-graph-test + pip install build --upgrade + pip install -r requirements.txt + python setup.py sdist bdist_wheel + pip install dist/intel_extension_for_transformers*.whl + pip list + + - name: BF16 Benchmark + run: | + cd ${{ github.workspace }}/.github/workflows/script/models + bash cpp_graph_inference.sh cpp-graph-test ${{ matrix.modelName }} ${{ env.INPUT_COMPILER_VERSION }} + + - name: Rename summary + run: | + cd ${{ github.workspace }} + cp cpp_graph_summary.log cpp_graph_summary_${{matrix.modelName}}.log + + - name: Publish pipeline artifact + uses: actions/upload-artifact@v3 + if: ${{ !cancelled() }} + with: + name: cpp_graph + path: ${{ github.workspace }}/cpp_graph_summary_${{matrix.modelName}}.log + if-no-files-found: ignore # 'warn' or 'ignore' are also available, defaults to `warn` + retention-days: 60 # 1 <= retention-days <= 90 + + Genreate-Report: + runs-on: itrex-node-spell + needs: [CPP-Graph-Workflow] + steps: + - name: Docker Clean Up + run: | + docker ps -a + if [[ $(docker ps -a | grep -i '${{ env.CONTAINER_NAME }}-${{ runner.name }}'$) ]]; then + docker start ${{ env.CONTAINER_NAME }}-${{ runner.name }} + echo "remove left files through container ..." + docker exec ${{ env.CONTAINER_NAME }}-${{ runner.name }} bash -c "ls -a /intel-extension-for-transformers && rm -fr /intel-extension-for-transformers/* && rm -fr /intel-extension-for-transformers/.* || true" + fi + - name: Checkout out Repo + uses: actions/checkout@v3 + + - name: Download Summary Log + uses: actions/download-artifact@v3 + with: + path: ${{ env.OUT_SCRIPT_PATH }}/generated/log + + - name: Merge CPP Graph Summary Log + run: | + cd ${{ env.OUT_SCRIPT_PATH }}/generated/log/cpp_graph + for summary in $(find . -name "cpp_graph_summary_*.log"); do cat $summary >> cpp_graph_summary.log; done + + - name: Download Reference Artifact + id: download-artifact + uses: dawidd6/action-download-artifact@v2 + with: + workflow: cpp-graph-test.yml + name: FinalReport + run_id: ${{ vars.GRAPH_REF_ID }} + path: ${{ env.OUT_SCRIPT_PATH }} + name_is_regexp: true + repo: ${{ github.repository }} + check_artifacts: false + search_artifacts: false + skip_unpack: false + if_no_artifact_found: warn + + - name: Display structure of downloaded files + run: cd ${{ env.OUT_SCRIPT_PATH }} && ls -R + + - name: Generate report + run: | + echo "------ Generating final report.html ------" + cd ${{ env.OUT_SCRIPT_PATH }} + /usr/bin/bash generate_report.sh --workflow=deploy + sed -n '//,/<\/body>/p' generated/report.html | sed -r '/^$/d' | sed -r 's/^ +//g' >> $GITHUB_STEP_SUMMARY + env: + RUN_DISPLAY_URL: https://github.com/VincyZhang/intel-extension-for-transformers/actions/runs/${{ github.run_id }} + BUILD_NUMBER: ${{ github.run_id }} + JOB_STATUS: succeed + MR_source_branch: ${{ github.head_ref }} + ghprbActualCommit: ${{ github.event.pull_request.head.sha }} + + - name: Publish Report + uses: actions/upload-artifact@v3 + if: ${{ !cancelled() }} + with: + name: FinalReport + path: ${{ env.OUT_SCRIPT_PATH }}/generated + + - name: Specify performance regression + run: | + if [ $(is_perf_reg) == 'true' ]; then + echo "[Performance Regression] Some model performance regression occurred, please check artifacts and reports." + exit 1 + fi diff --git a/.github/workflows/format_scan.yml b/.github/workflows/format_scan.yml new file mode 100644 index 000000000..02b275c34 --- /dev/null +++ b/.github/workflows/format_scan.yml @@ -0,0 +1,84 @@ +name: Format Scan + +on: + pull_request: + branches: [main] + paths: + - intel_extension_for_transformers/** + - neural_chat/** + - workflows/** + - setup.py + - .github/workflows/format_scan.yml + - .github/workflows/script/formatScan/** + workflow_dispatch: + +# If there is a new commit, the previous jobs will be canceled +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + DOCKER_CONFIG_NAME: "commonDockerConfig" + REPO_NAME: "code-scan" + REPO_TAG: "1.0" + DOCKER_FILE_NAME: "codeScan" + CONTAINER_NAME: "codeScan" + +jobs: + format-scan: + runs-on: itrex-node-spell + strategy: + matrix: + job_name: [ + "pylint", + "bandit", + "clangformat", + "cloc", + "cpplint", + # "pydocstyle", + #"pyspelling", + ] + fail-fast: false + steps: + - name: Docker Clean Up + run: | + docker ps -a + if [[ $(docker ps -a | grep -i '${{ env.CONTAINER_NAME }}-${{ runner.name }}'$) ]]; then + docker start ${{ env.CONTAINER_NAME }}-${{ runner.name }} + echo "remove left files through container ..." + docker exec ${{ env.CONTAINER_NAME }}-${{ runner.name }} bash -c "ls -a /intel-extension-for-transformers && rm -fr /intel-extension-for-transformers/* && rm -fr /intel-extension-for-transformers/.* || true" + fi + + - name: Checkout out Repo + uses: actions/checkout@v3 + + - name: Docker Build + run: | + docker build -f ${{ github.workspace }}/.github/workflows/docker/${{ env.DOCKER_FILE_NAME }}.dockerfile -t ${{ env.REPO_NAME }}:${{ env.REPO_TAG }} . + + - name: Docker Run + run: | + if [[ $(docker ps -a | grep -i '${{ env.CONTAINER_NAME }}-${{ runner.name }}'$) ]]; then + docker stop ${{ env.CONTAINER_NAME }}-${{ runner.name }} + docker rm -vf ${{ env.CONTAINER_NAME }}-${{ runner.name }} || true + fi + docker run -dit --memory="4g" --memory-reservation="1g" --disable-content-trust --privileged --name=${{ env.CONTAINER_NAME }}-${{ runner.name }} --shm-size="1g" \ + -v ${{ github.workspace }}:/intel-extension-for-transformers \ + ${{ env.REPO_NAME }}:${{ env.REPO_TAG }} + + - name: Env build + run: | + docker exec ${{ env.CONTAINER_NAME }}-${{ runner.name }} \ + bash /intel-extension-for-transformers/.github/workflows/script/prepare_env.sh + + - name: Code scan check + run: | + docker exec ${{ env.CONTAINER_NAME }}-${{ runner.name }} \ + bash -c "bash /intel-extension-for-transformers/.github/workflows/script/formatScan/${{ matrix.job_name }}.sh" + + - name: Publish pipeline artifact + if: ${{ !cancelled() }} + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.job_name }} + path: ${{ github.workspace }}/.github/workflows/script/formatScan/${{ matrix.job_name }}.* diff --git a/.github/workflows/llm-test.yml b/.github/workflows/llm-test.yml new file mode 100644 index 000000000..c497ed263 --- /dev/null +++ b/.github/workflows/llm-test.yml @@ -0,0 +1,148 @@ +name: LLM Model Test + +on: + pull_request: + branches: [main] + paths: + - '.github/workflows/llm-test.yml' + - '.github/workflows/script/models/run_llm.sh' + - "intel_extension_for_transformers/llm/runtime/deprecated/**" + - "!intel_extension_for_transformers/llm/runtime/deprecated/kernels/**" + - "!intel_extension_for_transformers/llm/runtime/graph/**" + - "!intel_extension_for_transformers/llm/runtime/deprecated/test/**" + - "!intel_extension_for_transformers/llm/runtime/deprecated/third_party/**" + - "!intel_extension_for_transformers/llm/runtime/deprecated/docs/**" + workflow_dispatch: + +# If there is a new commit, the previous jobs will be canceled +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + OUT_SCRIPT_PATH: ${{ github.workspace }}/.github/workflows/script/models + SCRIPT_PATH: ${{ github.workspace }}/.github/workflows/script + WORKING_DIR: ${{ github.workspace }} + EXTRA_CONTAINER_NAME: "codeScan" + + +jobs: + LLM-Workflow: + runs-on: spr + strategy: + matrix: + include: + - modelName: "gpt-j-6b" + framework: "engine" + mode: "latency" + precision: "bf16,int8,fp8" + steps: + - name: Checkout out Repo + uses: actions/checkout@v3 + with: + submodules: "recursive" + fetch-tags: true + + - name: Env build + run: | + bash ${{ github.workspace }}/.github/workflows/script/prepare_env_with_conda.sh "llm-test" "3.8" + + - name: Binary build + run: | + cd ${{ github.workspace }} + conda activate llm-test || source activate llm-test + pip install build --upgrade + pip install -r requirements.txt + python setup.py sdist bdist_wheel + pip install dist/intel_extension_for_transformers*.whl + pip list + + - name: BF16 Benchmark + run: | + cd ${{ github.workspace }}/.github/workflows/script/models + bash run_llm.sh --model=${{ matrix.modelName }} --framework=${{ matrix.framework }} --mode=${{ matrix.mode }} --conda_env_name=llm-test --precision=bf16 + + - name: INT8 Benchmark + run: | + cd ${{ github.workspace }}/.github/workflows/script/models + bash run_llm.sh --model=${{ matrix.modelName }} --framework=${{ matrix.framework }} --mode=${{ matrix.mode }} --conda_env_name=llm-test --precision=int8 + + - name: FP8 Benchmark + run: | + cd ${{ github.workspace }}/.github/workflows/script/models + bash run_llm.sh --model=${{ matrix.modelName }} --framework=${{ matrix.framework }} --mode=${{ matrix.mode }} --conda_env_name=llm-test --precision=fp8 + + + - name: Publish pipeline artifact + uses: actions/upload-artifact@v3 + if: ${{ !cancelled() }} + with: + name: llm + path: ${{ github.workspace }}/*.log + if-no-files-found: ignore # 'warn' or 'ignore' are also available, defaults to `warn` + retention-days: 60 # 1 <= retention-days <= 90 + + Genreate-Report: + runs-on: itrex-node-spell + needs: [LLM-Workflow] + steps: + - name: Docker Clean Up + run: | + docker ps -a + if [[ $(docker ps -a | grep -i '${{ env.EXTRA_CONTAINER_NAME }}-${{ runner.name }}'$) ]]; then + docker start ${{ env.EXTRA_CONTAINER_NAME }}-${{ runner.name }} + echo "remove left files through container ..." + docker exec ${{ env.EXTRA_CONTAINER_NAME }}-${{ runner.name }} bash -c "ls -a /intel-extension-for-transformers && rm -fr /intel-extension-for-transformers/* && rm -fr /intel-extension-for-transformers/.* || true" + fi + - name: Checkout out Repo + uses: actions/checkout@v3 + + - name: Download Summary Log + uses: actions/download-artifact@v3 + with: + path: ${{ env.OUT_SCRIPT_PATH }}/generated/log + + - name: Download Reference Artifact + id: download-artifact + uses: dawidd6/action-download-artifact@v2 + with: + workflow: llm-test.yml + name: FinalReport + run_id: ${{ vars.LLM_REF_ID }} + path: ${{ env.OUT_SCRIPT_PATH }} + name_is_regexp: true + repo: ${{ github.repository }} + check_artifacts: false + search_artifacts: false + skip_unpack: false + if_no_artifact_found: warn + + - name: Display structure of downloaded files + run: cd ${{ env.OUT_SCRIPT_PATH }} && ls -R + + - name: Generate report + run: | + echo "------ Generating final report.html ------" + cd ${{ env.OUT_SCRIPT_PATH }} + /usr/bin/bash generate_report.sh --workflow=deploy + env: + RUN_DISPLAY_URL: https://github.com/VincyZhang/intel-extension-for-transformers/actions/runs/${{ github.run_id }} + BUILD_NUMBER: ${{ github.run_id }} + JOB_STATUS: succeed + MR_source_branch: ${{ github.head_ref }} + ghprbActualCommit: ${{ github.event.pull_request.head.sha }} + + - name: Publish Report + uses: actions/upload-artifact@v3 + if: ${{ !cancelled() }} + with: + name: FinalReport + path: ${{ env.OUT_SCRIPT_PATH }}/generated + + - name: Specify performance regression + run: | + if [ $(is_perf_reg) == 'true' ]; then + echo "[Performance Regression] Some model performance regression occurred, please check artifacts and reports." + exit 1 + fi + \ No newline at end of file diff --git a/.github/workflows/scripts/formatScan/bandit.sh b/.github/workflows/scripts/formatScan/bandit.sh new file mode 100644 index 000000000..070c0574c --- /dev/null +++ b/.github/workflows/scripts/formatScan/bandit.sh @@ -0,0 +1,18 @@ +#!/bin/bash +source /intel-extension-for-transformers/.github/workflows/script/change_color.sh +pip install bandit==1.7.4 +log_dir=/intel-extension-for-transformers/.github/workflows/script/formatScan +python -m bandit -r -lll -iii /intel-extension-for-transformers >${log_dir}/bandit.log +exit_code=$? + +$BOLD_YELLOW && echo " ----------------- Current log file output start --------------------------" +cat ${log_dir}/bandit.log +$BOLD_YELLOW && echo " ----------------- Current log file output end --------------------------" && $RESET + +if [ ${exit_code} -ne 0 ]; then + $BOLD_RED && echo "Error!! Please Click on the artifact button to download and view Bandit error details." && $RESET + exit 1 +fi + +$BOLD_PURPLE && echo "Congratulations, Bandit check passed!" && $LIGHT_PURPLE && echo " You can click on the artifact button to see the log details." && $RESET +exit 0 diff --git a/.github/workflows/scripts/formatScan/clangformat.sh b/.github/workflows/scripts/formatScan/clangformat.sh new file mode 100644 index 000000000..3f0141bd5 --- /dev/null +++ b/.github/workflows/scripts/formatScan/clangformat.sh @@ -0,0 +1,21 @@ +#!/bin/bash +source /intel-extension-for-transformers/.github/workflows/script/change_color.sh + +pip install clang-format==14.0.0 +log_dir=/intel-extension-for-transformers/.github/workflows/script/formatScan +log_path=${log_dir}/clangformat.log + +cd /intel-extension-for-transformers +git config --global --add safe.directory "*" + +cd /intel-extension-for-transformers/intel_extension_for_transformers/llm/runtime/graph +python scripts/clang-format.py + +echo "run git diff" +git diff 2>&1 | tee -a ${log_path} + +if [[ ! -f ${log_path} ]] || [[ $(grep -c "diff" ${log_path}) != 0 ]]; then + exit 1 +fi +$BOLD_PURPLE && echo "Congratulations, check passed!" && $LIGHT_PURPLE && echo "You can click on the artifact button to see the log details." && $RESET +exit 0 diff --git a/.github/workflows/scripts/formatScan/cloc.sh b/.github/workflows/scripts/formatScan/cloc.sh new file mode 100644 index 000000000..771cd6f38 --- /dev/null +++ b/.github/workflows/scripts/formatScan/cloc.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +source /intel-extension-for-transformers/.github/workflows/script/change_color.sh +log_dir=/intel-extension-for-transformers/.github/workflows/script/formatScan +cloc --include-lang=Python --csv --out=${log_dir}/cloc.csv /intel-extension-for-transformers diff --git a/.github/workflows/scripts/formatScan/cpplint.sh b/.github/workflows/scripts/formatScan/cpplint.sh new file mode 100644 index 000000000..95f9b9224 --- /dev/null +++ b/.github/workflows/scripts/formatScan/cpplint.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +source /intel-extension-for-transformers/.github/workflows/script/change_color.sh + +pip install cpplint +REPO_DIR=/intel-extension-for-transformers +log_dir=/intel-extension-for-transformers/.github/workflows/script/formatScan +log_path=${log_dir}/cpplint.log +cpplint --filter=-build/include_subdir,-build/header_guard --recursive --quiet --linelength=120 ${REPO_DIR}/intel_extension_for_transformers/llm/runtime/deprecated/compile 2>&1 | tee ${log_path} +cpplint --filter=-build/include_subdir,-build/header_guard --recursive --quiet --linelength=120 ${REPO_DIR}/intel_extension_for_transformers/llm/runtime/deprecated/executor 2>&1 | tee -a ${log_path} +cpplint --filter=-build/include_subdir,-build/header_guard --recursive --quiet --linelength=120 ${REPO_DIR}/intel_extension_for_transformers/llm/runtime/deprecated/test 2>&1 | tee -a ${log_path} +if [[ ! -f ${log_path} ]] || [[ $(grep -c "Total errors found:" ${log_path}) != 0 ]]; then + exit 1 +fi +$BOLD_PURPLE && echo "Congratulations, check passed!" && $LIGHT_PURPLE && echo "You can click on the artifact button to see the log details." && $RESET +exit 0 diff --git a/.github/workflows/scripts/formatScan/nlp_dict.txt b/.github/workflows/scripts/formatScan/nlp_dict.txt new file mode 100644 index 000000000..52dcde010 --- /dev/null +++ b/.github/workflows/scripts/formatScan/nlp_dict.txt @@ -0,0 +1,2412 @@ +aa +aac +Abc +AbcAdaptor +AbcTuneStrategy +abi +ABI +absl +abspath +abstractive +acc +Acc +accuracies +acdc +ACDC +Acknowledgement +activations +Adadelta +adam +AdamW +adaptor +Adaptor +AddEmbeddings +AddN +addr +ADDR +AddV +ade +ADE +adresses +AdvProp +ae +aea +af +AGS +ai +ailab +al +albert +alexnet +AlexNet +algo +algos +alibaba +Alibaba +AlignImageChannel +allenai +alloc +ALLREDUCE +alsologtostderr +Aman +amazonaws +amazonlinux +Amodei +AmpConf +AMX +analytics +Analytics +Anastasiia +AnchorGenerator +andravin +andreamad +anisotropic +anno +anton +ap +apache +api +APIs +APl +APm +approch +APs +arg +argmax +ArgMax +args +arxiv +arXiv +asd +astype +asym +async +atrous +att +AttentionReshape +attr +attredirects +AttributeProto +attrs +auc +aug +autgrad +autogenerate +autograd +AutoMixPrecision +autopep +Autoregressive +ava +AverageMeter +avgloss +AvgPool +avx +AVX +AWQ +backend +backends +backticks +bart +barthez +bashrc +basicConfig +BasicTokenizer +batchgenerators +BatchMatMul +BatchMatMulV +batchnorm +BatchNorm +bayesian +BayesianOptimization +bazel +bbbb +bbox +bboxes +bccf +bce +bd +bdb +bdist +benchmarked +benchmarking +Benchmarking +Bengio +Benoît +berkeleyvision +bert +BERT's +BertAdam +BertConfig +BERTDataSet +BertForMaskedLM +BertForNextSentencePrediction +BertForPreTraining +BertForQuestionAnswering +BertForSequenceClassification +BertForTokenClassification +BertModel +berts +bertsquad +BertTokenizer +bfloat +BFP +BGR +Bianchi +BiasAdd +BibTeX +bicubic +bilibili +BiliBili +bilinear +BilinearImagenet +billsum +BiLSTM +binarize +binarized +BinaryAdd +biomedical +Biomedical +BLAS +blendcnn +BlendCnn +BlendCNN +bleu +BLEU +blocktime +blogpost +bn +bninception +BNInception +bobw +booktitle +bool +BoW +boxlist +BoxList +br +BrainTumour +BraTS +broadcasted +bs +bsnone +bugfix +buildin +builtin +Builtin +BV +bvlcalexnet +bzip +cadene +Cadene +caffe +Caffe +caffenet +Caffenet +cafferesnet +CaffeResnet +CaffeResNet +Caiming +calib +calibrationcsv +camembert +CamemBERT +canada +Carbonell +CascadeFullRes +cbica +cd +cdn +ce +cec +CenterCrop +centernet +centerNet +centos +CentOS +Centre +cern +CERN's +certfile +Cesa +cfa +cffi +cfg +CFLAGS +ChamNet +Chaumond +checkbox +checkboxes +Cheng +chmod +Cho's +chongruo +Chongruo +chris +Chuanqi +ci +cifar +CIFAR +circleci +Cistac +cityscapes +Cityscapes +cityscapesscripts +cityscapesScripts +cknowledge +ckpt +ckpts +ClassPredictionTower +clcarwin +Clergerie +cli +CLI +clipnorm +clm +CLM +cls +CLX +cly +cmake +CMake +CMAKE +CMakeLists +cmd +CMU +cn +cnn +cnt +CoCo +cocoapi +cocoApi +cocodataset +COCODataset +COCODemo +COCOEval +COCOmAP +COCOmAPv +COCONpy +cocoraw +COCORaw +COCORecord +codalab +codecogs +codenamed +CoLA +colorama +ColorJitter +colspan +compat +compilervars +concat +ConcatV +cond +conda +CONDA +condconv +CondConv +Condensenet +conf +config +Config +configs +CoNLL +Conneau +const +ConstantOfShape +ConstDataLoader +constexpr +contaning +conv +Conv +ConvBNReLU +ConvertingSSDMobilenetToONNX +convertion +ConvNets +convolutional +Convolutional +ConvPerStage +ConvReLU +copt +coreml +CoreML +cp +cpp +cpu +cpus +CPUs +CPX +cpython +creafz +creatis +creativecommons +criteo +Criteo +CriteoTerabyte +croping +CropResize +CropToBoundingBox +CrossEntropyLoss +crossvalidaton +crt +csv +ctrl +CTRLModel +CTRLTokenizer +ctuning +ctx +cuda +cudaPopCallConfiguration +cudatoolkit +CUDAToolKit +cudnn +CUHK +curr +Curran +custormer +cv +CvAClvFfyA +CXX +cxxopt +cypw +cython +da +dae +DagnyT +Dai +dailymail +Danqi +darknet +Darknet +datadir +datafile +dataloader +dataLoader +DataLoader +DataLoadermodule +dataloaders +DataParallel +datapoints +DataProcessor +dataset +Dataset +DatasetAnalyzer +datasets +datatype +datatypes +dathath +Dathathri +datset +dbmdz +DBMDZ +dbox +dbs +dcbadge +DCMAKE +dcn +ddp +DDP +de +deberta +DecodeImage +deepengine +deeplab +DeepLab +deeplabv +DeepLabV +DeepLearningExamples +Delangue +DENABLE +denseblock +denselayer +densenet +DenseNet +Demo +deps +DepthwiseConv +dequant +dequantize +DequantizeLinear +DequantStub +DeQuantStub +desc +dest +destructor +detections +detectron +Detectron +dev +devel +Devlin +devtool +DFabiansResUNet +DFS +DGAN +dialogpt +DialoGPT +dicts +dir +dirname +Discrim +distil +Distil +distilbert +DistilBert +DistilBERT +DistilBERT +DistilBertModel +DistilBertTokenizer +distilgpt +DistilGPT +distillated +Distillated +distillating +DistilmBERT +distilrobert +distilroberta +DistilRoBERTa +DistributedDataParallel +DistributedOptimizer +DistributedSampler +distro +dividiti +Djamé +DKFZ +dl +dlabel +dlboost +dlrm +DLRM's +dmlc +DMQA +dNative +dnf +dnn +dnnl +DNNL +Dockerfile +doclist +docstrings +doctrings +docutils +doteq +dowmsampling +downloader +downsampled +downsampling +doxygen +dpn +DPNs +dpr +DropOut +ds +dscore +dst +dtype +DualPathNet +dualpathnetworks +DualPathNetworks +DummyDataLoader +dunet +DUNet +Dupont +Durand +dvdt +dw +dynamiccaly +ead +EAQkaohzrJbd +earlystop +eb +ecotrust +edgetpu +EdgeTPU +edu +ee +eer +ef +efficientnet +efficientNet +EfficientNet +EfficientNets +eg +eightbit +einstein +el +elif +eltwise +emb +embeddings +embs +EMC +enablerepo +EncodeJped +enfr +eng +ensembling +ensp +entrypoint +enum +env +eq +erf +Erf +Éric +eriklindernoren +Errno +esri +et +eval +evaluator +evel +exemplarily +exising +existing +exmaple +expanduser +ExperimentPlanner +ExperimentPlanners +extractive +EzjbRL +fabian +FabiansUNet +facebook +FaceBook +facebookresearch +fairseq +fallbacks +fanout +faq +Farhadi +FashionMNIST +FasterRCNN +FastFormers +fastrcnn +fatihcakirs +favourably +fb +fbgemm +FBNet +fbnetc +fbresnet +FBResNet +fc +fcn +FCN +fd +FeatureExtractor +feedbacks +Feng +ffc +filename +filenames +FileNotFoundError +filepath +filesystem +finbert +finetune +Finetune +finetuned +finetuning +flac +FlatMapDataset +flaubert +flavour +flavours +Flavours +floatfunctional +FloatFunctional +FloatTensor +FLOPs +Florian +fmfn +fmt +fmtstr +fn +fname +fns +foregound +fp +FP +fpic +fPIC +fpn +FPN +FRN +FromConfig +frontend +fstack +ftfy +Fu +fullres +func +functionalities +functionet +functools +Funtowicz +fw +FWK +fx +GameAI +GANs +Garnett +gcc +gclient +gd +geffnet +gelu +Gelu +GeluOperator +GenEfficientNet +GenericPreprocessor +german +germeval +GermEval +gestaltit +getitem +getsize +GetStrides +GenAI +GFLOPs +gg +gh +gid +Gimpel +Girshick +github +GitHub +githubusercontent +gitmodules +GLIBCXX +GLOG +GLUE +gluebenchmark +gluepy +gluon +Gluon +gluoncv +GluonCV +gluonnlp +gn +GN +goldsborough +goog +google +googleapis +googleblog +googlenet +googlesource +Goyal +gpg +GPG +gpt +gpu +gpus +GPUs +graphdef +GraphDef +GraphModule +GraphProto +Grauman +grpc +gtFile +gtFine +Gui +Guillaume +Guoming +gz +gzY +Haibin +haibinlin +Haihao +hangzhang +hardcoding +HasAns +hawq +HAWQ +HdQ +heatmaps +Hein +helloworld +HelloWorld +henson +hiddenlayer +hippocampus +Hippocampus +HistogramObserver +hlu +horovod +Horovod +HOROVOD +horovodrun +hostfile +Hounsfield +howpublished +HqEgzS +href +html +http +https +Hu +hubert +huggingface +HuggingFace +HuggingFace's +HuggingFacesTS +hujie +hvd +HybirdBlock +HybridBlock +hyperparameter +hyperparameters +icc +ICCV +Icelake +icpc +icx +ide +idx +ie +IEEE +ILSVR +ilsvrc +ILSVRC +Ilya +im +imagecocodataset +ImageFolder +ImageList +imagenet +ImageNet +ImagenetRaw +ImageRecord +ImageRecordIter +imagesTr +imagesTs +img +imgrec +imgs +imgx +IML +impl +ImportError +IMS +inceptionresnetv +InceptionResNetV +inceptionv +InceptionV +incollection +IndexType +indexValue +indices +indico +inferencer +informations +infos +init +InnerProduct +innersource +inp +inplace +inproceedings +inputcsv +InputData +InputExample +InputFile +Inria +insa +instanceonly +instantiation +integerops +intel +intelai +IntelAI +interoperability +introudces +ints +inturn +InvertedResidual +io +ios +iOS +iou +IoU +ipc +ipex +IPEX +ipynb +ipython +ir +irv +ISA +Isensee +isinstance +issuecomment +IssueQuery +IssueQueryThreads +iter +IteratorGetNext +iters +Jäger +jemalloc +Jens +Jie +jim +Jingfei +Jiong +jit +jitter +Joshi +jpeg +JPEGImages +jpg +jpwarren +json +jsons +Julien +JunWang +jupyter +kaggle +kaggleAdDisplayChallenge +kaiming +KaimingHe +Karthik +kcho +keepbs +keepdim +keras +Keskar +keyfile +keypoint +Keypoint +kimiyoung +kitti +kmp +KMP +KnowledgeDistillationLoss +kriz +kwargs +Kyunghyun +LabelShift +labelsTr +Lample +Lan +lang +LanguageModeling +Lapata +Larochelle +LastLayerShape +latencies +LaTeX +Lavin +layernorm +LayerNorm +layoutlm +ld +len +LessEqual +lf +lfaidata +lfs +li +libdeep +libengine +libffi +libGL +libglib +libiomp +libmlperf +librispeech +LibriSpeech +librosa +libsndfile +libstdc +libz +licence +liKE +Limitting +lin +linkopt +linoxide +linux +linuxfoundation +ListDataset +LiTS +Liu +Liu's +llvmlite +lm +LMHeadModel +ln +loadgen +LoadGen +LOADGEN +LoadGen's +LoadgenAPI +LoadgenAPITestSettings +LoadgenVersion +LoadImage +LOC +localdisk +localhost +LOCderiv +LOCpart +logdir +logfile +login +logits +LOGLEVEL +LogSettings +logtostderr +longformer +lossy +Louf +LowPrecisionInferenceTool +lowproposals +lowres +Lp +lpot +LPOT +LPOT's +lr +lS +LSVRC +lt +lua +Luan +lutzroeder +lyon +Lysandre +lzma +macOS +MACOSX +MAdds +Madotto +MagnitudePrunePolicy +Maier +mainpage +Makefile +MakefileGnProj +MakeIterator +Mandar +Manmatha +manylinux +mAp +mAP +Mapillary +marianmt +MaskPostProcessor +maskrcnn +MaskRCNNFPNFeatureExtractor +maskrnn +massa +Massa +matcher +matmul +MatMul +MatMulWithBias +MatMulWithBiasAdd +MatMulWithBiasGelu +MatMulWithBiasTanh +matplotlib +matricses +maxdepth +maxindrange +maxk +MaxPool +maxSizeInComplete +mbart +mBERT +mcc +McCann +mcordts +md +MeanSquaredError +measurer +Medcial +medicaldecathlon +meetup +mem +membind +mems +messi +metabuild +metadata +metamind +MICCAI +microsoft +miguelgrinberg +Mingda +minibatch +minilm +minimalistic +minival +minloglevel +minmax +MinMaxObserver +mins +mIoU +mIOU +Mirella +misalignments +miscs +Mish +missmatches +MixedConv +mixnet +MixNet +mixup +mkdir +mkl +MKL +mlap +mlas +MLAS +mlcommons +mll +mlm +mlp +mlpc +mlperf +MLperf +MLPerf +mlt +mmdetection +mmlab +MMLAB +mnasnet +MNASNet +mnist +MNIST +mnli +MNLI +mnZDVAbm +mobilebert +MobileBERT +mobilenet +MobileNet +mobilenetv +Mobilenetv +MobileNetv +MobileNetV +modalities +Modalities +modality +ModelConversion +modelfeatures +modelforward +modelinput +modellogits +modelmean +modelsize +modelstd +ModuleDict +ModuleNotFoundError +Molino +mpi +mrcnn +mrpc +MRPC +MSD +mse +MSE +msvc +mul +mult +multi +Multi +multiclass +multilabel +multinli +MultiNLI +multiscale +MULTISCALE +MultiStream +MultiStream's +MultiStreamFree +mutli +mv +mx +mxnet +MxNet +MXNet +MyDataset +Mykhailo +Myle +MyMetric +myModel +MYTASK +MYTASKNAME +Naman +namedtuple +nanohanno +Narasimhan +NAS +nasnet +NASNet +nasnetalarge +nasnetamobile +nb +nbest +nbsp +nc +NCCL +nchw +NCHW +nd +ndarray +NDArray +nderlu +nepoch +ner +NER +nervanasystems +nesterov +NetEase +netron +Netron +NeurIPS +neval +NewMetric +newstest +nextplatform +ng +ngatang +NGPUS +ngram +NHWC +nifti +niftis +nii +Nijmegen +Nitish +nl +NLG +nli +nll +nlp +NLP +nlpyang +nltk +NLU +nm +nms +nn +nnodes +nnu +nnU +nnunet +nnUnet +nnUNet +nnUNetPlansv +nnUNetTrainer +nnUNetTrainers +nnUNetTrainerV +NNZ +noduplicates +NoisyStudent +Nonlinearity +NonNestedTuple +NoNormalization +NonZero +noobj +np +nproc +npy +npz +nq +nrix +ns +nsdf +nSsKchNAySU +nthreads +ntrain +num +numactl +numba +numCompleteThreads +numerics +numpy +numTest +numTraining +NVAITC +nvcc +nvidia +NVIDIA +NVIDIA's +nvme +Nx +nyu +ok +ol +Omer +OMP +onboarding +oneapi +oneAPI +onednn +oneDNN +onlinedocs +onnx +ONNX +ONNXQuantizer +onnxrt +ONNXRT +onnxruntime +OnnxRuntime +oob +OOM +OOQtYMH +openai +OpenAI +OpenAI's +OpenAIAdam +OpenAIGPTModel +OpenAIGPTTokenizer +opencv +OpenCV +openmp +openslr +opensource +openssl +openvino +OpenVINO +openvinotoolkit +OpenWebTextCorpus +OperatorConfig +OPs +opset +opsetid +optim +optimizations +Optimizations +optimizers +Optimizers +optypewise +opwise +OrderedDict +ORGderiv +ORGpart +os +osJJ +OTH +OTHderiv +OTHpart +Ott +oup +outdir +OutputData +outputfile +ov +overfeat +overfit +overfitted +PaddingSequence +PaddingSequence +pageId +palletsprojects +panoptic +Panoptic +paperswithcode +param +params +Parinov +ParseDecodeImagenet +ParseDecodeVoc +participations +Parzen +pastebin +patientIDs +pb +pbar +pdf +Peason +pegasus +pelee +peleenet +PeleeNet +Penghui +Pengxin +pepy +PerChannelMinMaxObserver +PERderiv +perf +perftests +PERpart +phrasebank +phy +physcpubind +PhYUmn +Piero +Pierric +PIL +pixAcc +Piyush +pjreddie +pkill +pkl +pky +plm +PLM +pls +pnasnet +PNASNet +png +POC +polynet +PolyNet +Pooler +pos +postprocesing +postprocess +postprocessed +postprocessing +PostProcessor +PostTransform +PowerTools +pplm +PPLM +PQ +pre +prebuild +prebuilt +Prec +precisions +pred +preds +preformance +Preload +preprint +preprocess +preprocessed +preprocesses +preprocessing +preprocessor +Preprocessor +PreprocessorFor +Preprocessors +prerelease +PreSumm +pretrain +pretrained +pretrainedmodels +pretraining +prev +prioritizies +probs +proc +productizing +profilings +ProgressBar +proto +Protobuf +protoc +protractortest +PRs +PrunePolicy +pth +ptq +PTQ +ptr +pudae +pw +PWC +pwd +PWD +px +py +pybind +pycocotools +pyguide +pylint +pymodule +PyObject +pypi +PyPI +PySUT +pytest +PythonAPI +PYTHONPATH +pytorch +PyTorch +pytorchic +PyTorchKnowledgeDistillationLoss +pyyaml +PyYAML +PZ +qat +QAT +qconfig +QConfig +QiaoranC +qint +qlinear +QLinear +qlinearops +QnA +qnli +QNLI +qps +QPS +qqp +QQP +qscheme +qsl +QSL +qtcreator +qtype +quant +quantile +quantizable +Quantizable +quantization +Quantization +quantize +quantized +QuantizedConv +QuantizedConvReLU +QuantizedInput +quantizer +quantizes +Quantizes +quantizing +QuantStub +QueryBackendCapability +QuerySampleComplete +QuerySampleLibrary +quickstart +Quickstart +QuickStart +Quoc +R'emi +Radboud +Radford +Radu +rAjHyXhTzz +rajpurkar +ramdisk +RandAug +RandAugment +randn +RandomCrop +RandomHorizontalFlip +RandomResizedCrop +RandomVerticalFlip +Rault +rc +rcnn +readme +README +ReadmeBuild +ReadmeFAQ +ReadmeHtmlDocs +ReadmeTests +readthedocs +realtime +Realtime +rebase +recommonmark +RecordingObserver +recordio +RecordIO +recurse +Redmon +ReduceMean +regex +RegNet +rehm +Rehm +reinstall +relase +relu +Relu +ReLU +repo +repo's +repo’s +repos +representating +requantize +resampled +resampling +rescale +Rescale +ResencUNet +resize +Resize +ResizeCropImagenet +resized +Resizes +ResizeWithRatio +resnest +ResNest +ResNeSt +resnet +Resnet +ResNet +resnetv +ResNetV +resnext +ResNext +ResNeXt +ressource +ressources +reStructuredText +ret +RetinaMask +retinanet +retinaNet +RetinaNet +reusability +Rewon +rf +rfcn +rgb +RGB +rmax +rmin +RMSE +rn +rng +RNN +rnnt +ro +roberta +RoBERTa +RobertaModel +RobertaTokenizer +ROC +RocStories +Romary +rosanneliu +rougeL +rougeLsum +rowanz +rowspan +RPN +RPNHead +RPNPostProcessor +Rsqrt +rst +rtd +RTX +runhooks +runtime +Runtime +RuntimeError +Rusia +rusiaaman +Ruslan +rw +rwightman +sacremoses +Sagot +Salakhutdinov +salesforce +Salesforce +Salimans +sanh +Sanh +sata +SavedModel +SavedModel +Scalable +scaler +scatterFillKernel +sched +scikit +scm +screenshots +ScriptModule +se +sed +Seddah +seg +segm +SegmentationMask +segmentations +seid +senet +SENet +sentencepiece +Sep +SEP +SeqDataCollator +serializable +ServerPool +sess +setuptools +sexualized +SGD +sgmoid +SHA +sharded +Sharma +Shen +Shirish +shouldn +showEvent +shufflenet +Shufflenet +ShuffleNet +shufflenetv +Shvets +sigmoid +signup +sigopt +Sigopt +SigOpt +SingleStream +skx +Skylake +skylion +SMBO +SMBOs +Smola +smoothes +sndfile +Socher +socio +SocketIO +softmax +somain +Soricut +sota +SOTA +sox +SoX +spacings +spacy +SpaCy +SparseCategoricalAccuracy +SparseCategoricalCrossentropy +sparsified +Spearman +spearmanr +specificities +splitted +spm +spnasnet +Sqrt +sqSiUy +Squad +SQuAD +SquadF +squadpy +SquadV +SquaredDifference +squeezebert +squeezenet +SqueezeNet +src +SrcTuple +sryqufw +ssd +SSDMobilenet +sshleifer +sst +stackoverflow +Standley +startswith +StartTest +stdout +stds +stefan +stemblock +stepsize +Stoyanov +str +strided +struct +sts +STS +stsb +styleguide +Suárez +subexpression +subfolder +subfolders +Subgraph +submodule +submodules +Submodules +subsample +subtoken +sudo +Sumanth +summarization +Summarization +SummaryWriter +superseeds +suported +sut +SUT +Sutskever +sv +svg +swagaf +sym +symlink +symlinked +symlinks +Symlinks +synset +sys +SystemUnderTest +tanh +TaskXX +TaskXXX +tb +TBD +tbe +tbody +td +techdecoded +tencent +tensor's +tensorboard +tensorBoard +TensorBoard +tensorcore +TensorDataset +tensorflow +TensorFlow +TensorflowQuery +tensorImageSize +TensorInfo +TensorProto +teraoperations +tesla +testability +TestSettings +tf +TF +TFBertForSequenceClassification +tflite +tfp +tfrecord +TFRecord +TFRecordDataset +tfrecords +TFRobertaModel +TFSlimNetsFactory +TFSlimNetsFactory's +tg +tgt +tgz +th +THCudaTensor +thead +thepath +thres +thrs +Tian +Tidx +timeline +timestamps +TinyBERT +tl +tlkh +tLoss +TLS +tmp +tmpfs +ToArray +ToBGR +toc +toctree +TODO +tokenization +tokenize +tokenized +tokenizer +Tokenizer +tokenizers +Tokenizers +tokenizing +tol +TOL +tolist +toml +ToNDArray +toolchains +ToPILImage +topk +TopK +topologies +ToRange +torchaudio +torchscript +TorchScript +torchtext +torchvision +TorchVision +toronto +totalizing +ToTensor +Toutanova +tp +tpe +TPE +tpu +TPU +tqdm +traceback +trainings +trainval +trainvaltest +transfo +TransformImage +TransfoXLModel +TransfoXLTokenizer +travis +trigram +tstandley +tsv +TuneStrategy +tunings +tuningusage +tuple +tuples +txt +TZ +uber +ubuntu +ubyte +UI +UID +uint +uk +un +uncomment +uncompress +unet +Unet +UNet +unidecode +uniq +unittest +unref +unsqueeze +unstack +upenn +uploader +upscaled +Upscaled +upstreamed +url +userspace +usp +usr +util +utils +ux +UX +valminusminival +valset +ValueError +Varshney +VCVTNE +VCVTNEPS +VDPBF +vec +Veronika +veronikayurchuk +versioned +Veselin +vgg +viewpage +Villemonte +ViT +voc +VOC +VOCdevkit +VOCmAP +VOCMApMetrics +VOCRecord +vercel +voxel +voxels +vram +VRAM +VTune +waleedka +Wallach +wangg +warmup +wav +wd +webcam +Webcam +webite +webpage +WebSockets +WebText +wedam +WeightSharedConvolutionalBoxPredictor +Wformat +wget +whitelist +whl +WideResNet +WideResNet +Wightman +wikipedia +wikitext +WikiText +WIP +wmt +wnd +WnD +wnli +Wnxu +WordPiece +workdir +workflow +Workflow +workflows +workspace +wrt +wwm +www +xad +xception +Xception +xcode +xeon +Xeon +Xiang +Xiong +xl +XLA +xlm +XLMModel +XLMTokenizer +xlnet +XLNet +XLNetModel +XLNetTokenizer +XlUH +xml +xnli +XNLI +xsum +xV +xvf +xvzf +XXXX +xxy +xxz +xYNrZdEAnrHk +xywh +xyxy +xz +xzvf +yacs +yaml +yamls +Yi +Yiming +Yinhan +yizhu +yjxiong +YKd +Yoann +yolo +yolov +YOLOv +YOLOV +yosinski +Yosinski +YqgzY +Yuanjun +Yue +Yunpeng +Yurchuk +YY +zenodo +Zettlemoyer +zfnet +ZFNet +zh +zhang +Zhang +zhanghang +Zhenzhong +Zhi +Zhilin +Zhongyue +zhongyuezhang +Zhu +Zihang +zihangdai +znoexecstack +znow +Zptls +zrelro +zrl +zxvf +networkbuilders +MaskRCNN +DDR +LTS +NIC +SSDSC +WLYDCRB +WilsonCity +UTC +AutoDistillation +AutoModelForPreTraining +IntermediateLayersKnowledgeDistillationLoss +advisor +autodistillation +excuting +feedforward +funcs +inorder +intra +matchers +NNCF +baremetal +ContextualWordEmbsForSentenceAug +DataAugmentation +EOS +KeyboardAug +OcrAug +SpellingAug +TextGenerationAug +TextStreamer +assertTrue +cvs +ebcad +makcedward +nlpaug +ocr +BasicMagnitude +DistillationConfig +GroupLasso +IntermediateLayersLoss +KnowledgeLoss +NLPTrainer +NoTrainerOptimizer +PatternLock +PostTrainingDynamic +PostTrainingStatic +PruningConfig +QuantizationConfig +baremetal +makcedward +nlpaug +nncf +libexecutor +compressions +aclweb +PrunerConfig +HW +Lecun +NLPToolkit +Yan +exdb +lecun +publis +yann +AutoConfig +AutoTokenizer +Initializer +classcification +english +mdx +ve +BFloat +Gemm +QDQ +desighed +AutoConfig +ExecutorBenchmark +ExecutorBenchmarkArguments +PyTorchBenchmark +PyTorchBenchmarkArguments +engl +english +finetu +mdx +innerproduct +matmul +xlsx +EleutherAI +FAC +MiniLM +QuantizationAwareTraining +bhadresh +conll +echarlaix +ehdwns +gchhablani +lvwerra +maskinuned +moshew +neo +philschmid +prajjwal +pruneofa +samsum +savani +textattack +InferenceResult +MiniLMv +QuaLA +squadv +torchprofile +DynamicLengthConfig +Lentgh +QuestionAnsweringTrainer +RobertaForQuestionAnswering +DynamicLengthConfig +QuaLA +SparseLib +descs +hpp +ker +kern +qa +spmm +comparedwith +tradeoff +hardwares +Kernels +kernels +zhihu +zhuanlan +spmm +tradeoff +InferenceResult +MiniLMv +QuaLA +comparedwith +squadv +torchprofile +multimodal +VNNI +AutoDistillationConfig +KD +Kullback +Leibler +PreTrainedModel +TFAutoModelForSequenceClassification +TFDistillationConfig +TFOptimization +archs +autodistill +makedirs +sloss +criterionoptional +distillationconfig +LeCun +executorbenchmark +pytorchbenchmark +november +Affine +ZeroPoint +protobuf +MLefficiency +cff +kawapanion +mlefficiency +diffusers +dm +lambdalabs +pokemon +sd +sysresearch +AutoModelForSequenceClassification +SapphireRapids +SetFit +scalable +AGNews +Rocketknight +dslim +cointegrated +csarron +huawei +krishna +noah +nas +EvalPrediction +HBM +PyPi +aireferenceimplementations +dtypes +environ +valhalla +BenchmarkConfig +Hanwen +Hengyu +Meng +Wenxin +bo +todo +JIRA +Chatbot +DLSA +NEOX +NeuralChat +PEFT +chatbot +peft +dlsa +cfefb +itrex +bibtex +CompVis +decapoda +ChatFALCON +ChatGPTNEOX +ChatLLAMA +ChatMPT +LLM +MPT +mailto +CODEOWNERS +Cpplint +DCO +cloc +jira +servicedesk +Bas +GHA +clangformat +dco +gtest +inteltf +jenk +mr +sign off +formatScan +Neuralchat +PenghuiCheng +VincyZhang +XuehaoSun +airMeng +kevinintel +lkk +luoyu +lvliang +xin +zhenwei +llm +Gaudi +Habana +customizable +mosaicml +mpt +ChatGLM +chatglm +gptj +gptneox +bdec +neuralchat +Livestream +RbKRELWP +youtube +Wxk +ZJkU +AutoModelForCausalLM +WeightOnlyQuantConfig +woq +Acknowledgements +AutoModel +HWs +HellaSwag +LLMs +PIQA +StarCoder +Winogrande +TangoBERT +AWQ +BitsAndBytesConfig +GPTQ +Qlora +RTN +TEQ +optimality +SmoothQuant +approvers +smoothquant +weightonlyquant +Eltociear +Hardik +Huiyan +Ikko +Jiafu +Jiqing +Niroop +QLoRA +Samanway +Sangjune +Xigui +NeelNanda +SPIQ +SmoothQuantConfig +TSModelForCausalLM +cdot +dequantization +dequantized +dq +frac +zp +baichuan +Epoches +FID +GGML +LLMRuntime +LLama +LoRA +PPN +TruthfulQA +chinese +electra +ggml +mc +neox +Aditya +Ammbashankar +Aryaman +Ashimine +Bhargav +Cao +Das +Guskin +Itay +Kamboj +LifengWang +Mamou +Nalamati +Shira +Shirin +Shrestha +Smoothieewastaken +Surav +Titsworth +Tsai +maktukmak +sangjune +ChatBot +Chatbots +Jianyu +VMware +marktechpost +Ahouzi +Anas +Ayaan +Bordoloi +Jogesh +Pyakurel +Rohan +Sandesh +SandeshPyakurel +Soni +Sudhanshu +Tripathi +akarX +dalvishruti +NormalFloat +backpropagates diff --git a/.github/workflows/scripts/formatScan/pydocstyle.sh b/.github/workflows/scripts/formatScan/pydocstyle.sh new file mode 100644 index 000000000..6c2f44ba5 --- /dev/null +++ b/.github/workflows/scripts/formatScan/pydocstyle.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +source /intel-extension-for-transformers/.github/workflows/script/change_color.sh + +REPO_DIR=/intel-extension-for-transformers +log_dir=/intel-extension-for-transformers/.github/workflows/script/formatScan +pydocstyle --convention=google ${REPO_DIR} >${log_dir}/pydocstyle.log +exit_code=$? + +$BOLD_YELLOW && echo " ----------------- Current pydocstyle cmd start --------------------------" && $RESET +echo "pydocstyle --convention=google ${REPO_DIR} >${log_dir}/pydocstyle.log" +$BOLD_YELLOW && echo " ----------------- Current pydocstyle cmd end --------------------------" && $RESET + +$BOLD_YELLOW && echo " ----------------- Current log file output start --------------------------" +cat $log_dir/pydocstyle.log +$BOLD_YELLOW && echo " ----------------- Current log file output end --------------------------" && $RESET + +if [ ${exit_code} -ne 0 ]; then + $BOLD_RED && echo "Error!! Please Click on the artifact button to download and view error details." && $RESET + exit 1 +fi + +$BOLD_PURPLE && echo "Congratulations, check passed!" && $LIGHT_PURPLE && echo " You can click on the artifact button to see the log details." && $RESET +exit 0 diff --git a/.github/workflows/scripts/formatScan/pylint.sh b/.github/workflows/scripts/formatScan/pylint.sh new file mode 100644 index 000000000..ff4e98203 --- /dev/null +++ b/.github/workflows/scripts/formatScan/pylint.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +source /intel-extension-for-transformers/.github/workflows/script/change_color.sh +cd /intel-extension-for-transformers +$BOLD_YELLOW && echo "---------------- git submodule update --init --recursive -------------" && $RESET +git config --global --add safe.directory "*" +git submodule update --init --recursive + +$BOLD_YELLOW && echo "---------------- install ITREX -------------" && $RESET +export PYTHONPATH=`pwd` +pip list + +cd /intel-extension-for-transformers/intel_extension_for_transformers/neural_chat/ +if [ -f "requirements.txt" ]; then + python -m pip install --default-timeout=100 -r requirements.txt + pip list +else + echo "Not found requirements.txt file." +fi + +cd /intel-extension-for-transformers +log_dir=/intel-extension-for-transformers/.github/workflows/script/formatScan +if [ -f "requirements.txt" ]; then + python -m pip install --default-timeout=100 -r requirements.txt + pip list +else + echo "Not found requirements.txt file." +fi +# install packages +pip install git+https://github.com/EleutherAI/lm-evaluation-harness.git@83dbfbf6070324f3e5872f63e49d49ff7ef4c9b3 +pip install accelerate nlpaug nltk schema optimum-intel==1.11.0 optimum==1.13.3 + +echo "[DEBUG] list pipdeptree..." +pip install pipdeptree +pipdeptree + +python -m pylint -f json --disable=R,C,W,E1129 \ + --enable=line-too-long \ + --max-line-length=120 \ + --extension-pkg-whitelist=numpy,nltk \ + --ignored-classes=TensorProto,NodeProto \ + --ignored-modules=tensorflow,torch,torch.quantization,torch.tensor,torchvision,mxnet,onnx,onnxruntime,neural_compressor,neural_compressor.benchmark,intel_extension_for_transformers.neural_engine_py,cv2,PIL.Image \ + --ignore-paths=/intel-extension-for-transformers/intel_extension_for_transformers/llm/runtime/graph/ \ + /intel-extension-for-transformers/intel_extension_for_transformers >${log_dir}/pylint.json +exit_code=$? + +$BOLD_YELLOW && echo " ----------------- Current log file output start --------------------------" && $RESET +cat ${log_dir}/pylint.json +$BOLD_YELLOW && echo " ----------------- Current log file output end --------------------------" && $RESET + +if [ ${exit_code} -ne 0 ]; then + $BOLD_RED && echo "Error!! Please Click on the artifact button to download and view Pylint error details." && $RESET + exit 1 +fi +$BOLD_PURPLE && echo "Congratulations, Pylint check passed!" && $LIGHT_PURPLE && echo " You can click on the artifact button to see the log details." && $RESET +exit 0 diff --git a/.github/workflows/scripts/formatScan/pyspelling.sh b/.github/workflows/scripts/formatScan/pyspelling.sh new file mode 100644 index 000000000..940332fab --- /dev/null +++ b/.github/workflows/scripts/formatScan/pyspelling.sh @@ -0,0 +1,29 @@ +#!/bin/bash +WORKING_PATH="/intel-extension-for-transformers" +for var in "$@" +do + case $var in + --WORKING_PATH=*) + WORKING_PATH=$(echo $var | cut -f2 -d=);; + esac +done + +log_dir=${WORKING_PATH}/.github/workflows/script/formatScan +VAL_REPO=${WORKING_PATH}/.github/workflows/script/formatScan +REPO_DIR=${WORKING_PATH} + +sed -i "s|\${VAL_REPO}|$VAL_REPO|g" ${VAL_REPO}/pyspelling_conf.yaml +sed -i "s|\${SCAN_REPO}|$REPO_DIR|g" ${VAL_REPO}/pyspelling_conf.yaml +echo "Modified config:" +cat ${VAL_REPO}/pyspelling_conf.yaml + +pip install pyspelling +pyspelling -c ${VAL_REPO}/pyspelling_conf.yaml >${log_dir}/pyspelling.log + +exit_code=$? +if [ ${exit_code} -ne 0 ]; then + $BOLD_RED && echo "Pyspelling exited with non-zero exit code." && $RESET + exit 1 +fi +$BOLD_PURPLE && echo "Congratulations, check passed!" && $LIGHT_PURPLE && echo "You can click on the artifact button to see the log details." && $RESET +exit 0 diff --git a/.github/workflows/scripts/formatScan/pyspelling_conf.yaml b/.github/workflows/scripts/formatScan/pyspelling_conf.yaml new file mode 100644 index 000000000..6fb64f3f0 --- /dev/null +++ b/.github/workflows/scripts/formatScan/pyspelling_conf.yaml @@ -0,0 +1,13 @@ +matrix: +- name: Markdown + aspell: + lang: en + d: en_US + mode: markdown + dictionary: + wordlists: + - ${VAL_REPO}/nlp_dict.txt + output: ${VAL_REPO}/nlp_dict.dic + sources: + - ${SCAN_REPO}/docs/* + - ${SCAN_REPO}/*.md \ No newline at end of file diff --git a/.github/workflows/scripts/launch_llm.sh b/.github/workflows/scripts/launch_llm.sh new file mode 100644 index 000000000..7e8dc3aeb --- /dev/null +++ b/.github/workflows/scripts/launch_llm.sh @@ -0,0 +1,107 @@ +#!/bin/bash +set -x +set -eo pipefail + +cores_list=(56) +batch_size_list=(1 4) +input_list=(32 512) +output_list=(32 128) +beam_list=(1 4) + +function main() { + conda_env="$1" + model="$2" + working_dir="$3" + log_prefix="$4" + script="${working_dir}/run_llm.py" + precision="$5" + # init params + if [[ "${model}" == "gpt-j-6b" ]] || [[ "${model}" == "gpt-j-6b-pruned" ]]; then + model_name="EleutherAI/gpt-j-6B" + elif [[ "${model}" == "llama-2-7b-chat" ]]; then + model_name="meta-llama/Llama-2-7b-chat-hf" + fi + + # init conda + #. $(dirname ${CONDA_EXE})/../etc/profile.d/conda.sh + conda activate $conda_env || source activate $conda_env + + # env + export KMP_BLOCKTIME=1 + export KMP_SETTINGS=1 + export KMP_AFFINITY=granularity=fine,compact,1,0 + export LD_PRELOAD=${CONDA_PREFIX}/lib/libiomp5.so:${CONDA_PREFIX}/lib/libtcmalloc.so + export GLOG_minloglevel=2 + + # launch benchmark + for cores_per_instance in ${cores_list[@]}; do + for batch_size in ${batch_size_list[@]}; do + for input in ${input_list[@]}; do + [[ "${input}" == "32" ]] && output=32 || output=128 + #sudo sh -c 'echo 3 > /proc/sys/vm/drop_caches' + logs_file="${model}-${precision}-${cores_per_instance}-${batch_size}-${input}-${output}.log" + ir_path="${working_dir}/${precision}_ir" + python ${WORKING_DIR}/.github/workflows/script/py_task_injection.py --task=get_ITREX_cpu_memory_info --file_name=${script} + if [[ ${precision} == "fp8" ]]; then + export NE_WEIGHT_FP8_4E3M=1 + ir_path="${working_dir}/bf16_ir" + numactl -m 1 -C 56-111 python ${script} --weight_type=fp8_4e3m --input-tokens $input --max-new-tokens $output --batch-size $batch_size --model_path ${ir_path} --model ${model_name} 2>&1 | tee ${WORKING_DIR}/${logs_file} || true + else + numactl -m 1 -C 56-111 python ${script} --input-tokens $input --max-new-tokens $output --batch-size $batch_size --model_path ${ir_path} --model ${model_name} 2>&1 | tee ${WORKING_DIR}/${logs_file} || true + fi + collect_perf_logs_llm ${logs_file} ${precision} + done + done + done + + conda deactivate >/dev/null 2>&1 +} + +function collect_perf_logs_llm { + # latency + log_dir="${WORKING_DIR}/$1" + latency=($(grep -i 'inference latency:' ${log_dir} | sed -e 's/.*atency://;s/[^0-9.]//g;s/\.$//' | awk ' + BEGIN { + num = 0; + sum = 0; + }{ + num ++; + sum += $1; + }END { + if(num > 0) { + printf("%d %.6f", num, sum / num); + }else { + printf("0 0"); + } + } + ')) + input_tokens=$input + max_new_tokens=$output + beam_search=4 + # throughput + throughput=($( + echo | awk -v bs=$batch_size -v it=$input -v sec=${latency[1]} -v i=${latency[0]} '{ + if(sec <= 0) { + print "0"; + }else { + printf("%.3f", bs * it / sec * i); + } + }' + ) 0) + # memory usage + used_memory=$(grep 'memory used total:' ${log_dir} | tail -n 1 | head -n 1 | awk '{print $(NF-1)}') + # summary + framework="engine" + mode_name="latency" + precision=$2 + link="${log_prefix}/$1" + printf "${framework},${mode_name},${model},${precision},${batch_size}," | tee -a ${WORKING_DIR}/llm_summary.log + printf "${input_tokens},${max_new_tokens},${beam_search},${used_memory}," | tee -a ${WORKING_DIR}/llm_summary.log + printf "${cores_per_instance},${latency[0]},${throughput[0]},${link} ," | tee -a ${WORKING_DIR}/llm_summary.log + printf "${latency[1]},${first_latency},${avg_latency},${p90_latency},$(hostname)\n" | tee -a ${WORKING_DIR}/llm_summary.log + set +x + echo -e "\n\n-------- Summary --------" + sed -n '1p;$p' ${WORKING_DIR}/llm_summary.log | column -t -s ',' +} + +main $@ 2>&1 | tee ${WORKING_DIR}/launch.log diff --git a/.github/workflows/scripts/prepare_env.sh b/.github/workflows/scripts/prepare_env.sh new file mode 100644 index 000000000..15f45e0c3 --- /dev/null +++ b/.github/workflows/scripts/prepare_env.sh @@ -0,0 +1,10 @@ +cd /intel-extension-for-transformers + +pip install -U pip + +if [ -f "requirements.txt" ]; then + python -m pip install --default-timeout=100 -r requirements.txt + pip list +else + echo "Not found requirements.txt file." +fi diff --git a/.github/workflows/scripts/prepare_env_with_conda.sh b/.github/workflows/scripts/prepare_env_with_conda.sh new file mode 100644 index 000000000..3539a10e3 --- /dev/null +++ b/.github/workflows/scripts/prepare_env_with_conda.sh @@ -0,0 +1,18 @@ +cd ${WORKING_DIR} +conda_env_name=$1 +python_version=$2 +if [[ -z "${conda_env_name}" ]] || [[ -z "${python_version}" ]]; then + $BOLD_RED && echo "need provide with conda env name and python version" && $RESET + exit 1 +fi + +conda create -n ${conda_env_name} python=${python_version} -y +source activate ${conda_env_name} || conda activate ${conda_env_name} +pip install -U pip + +if [ -f "requirements.txt" ]; then + python -m pip install --default-timeout=100 -r requirements.txt + pip list +else + echo "Not found requirements.txt file." +fi diff --git a/.github/workflows/scripts/run_unit_test_llmruntime.sh b/.github/workflows/scripts/run_unit_test_llmruntime.sh new file mode 100644 index 000000000..5df652ed4 --- /dev/null +++ b/.github/workflows/scripts/run_unit_test_llmruntime.sh @@ -0,0 +1,38 @@ +#!/bin/bash +source /intel-extension-for-transformers/.github/workflows/script/change_color.sh +test_install_backend="true" +LOG_DIR=/intel-extension-for-transformers/log_dir +mkdir -p ${LOG_DIR} +WORKING_DIR="/intel-extension-for-transformers/intel_extension_for_transformers/llm/runtime/graph" + +# -------------------LLM Runtime Test------------------- +function llmruntime_test() { + cd ${WORKING_DIR} + pip install -r requirements.txt + cd ${WORKING_DIR}/tests + local ut_log_name=${LOG_DIR}/unit_test_llm_runtime.log + find . -name "test*.py" | sed 's,\.\/,python ,g' | sed 's/$/ --verbose/' >run.sh + # run UT + $BOLD_YELLOW && echo "cat run.sh..." && $RESET + cat run.sh | tee ${ut_log_name} + $BOLD_YELLOW && echo "------UT start-------" && $RESET + bash run.sh 2>&1 | tee -a ${ut_log_name} + $BOLD_YELLOW && echo "------UT end -------" && $RESET + + if [ $(grep -c "FAILED" ${ut_log_name}) != 0 ] || + [ $(grep -c "OK" ${ut_log_name}) == 0 ] || + [ $(grep -c "Segmentation fault" ${ut_log_name}) != 0 ] || + [ $(grep -c "core dumped" ${ut_log_name}) != 0 ] || + [ $(grep -c "==ERROR:" ${ut_log_name}) != 0 ]; then + $BOLD_RED && echo "Find errors in engine test, please check the output..." && $RESET + exit 1 + else + $BOLD_GREEN && echo "engine test finished successfully!" && $RESET + fi +} + +function main() { + llmruntime_test +} + +main diff --git a/.github/workflows/unit-test-llmruntime.yml b/.github/workflows/unit-test-llmruntime.yml new file mode 100644 index 000000000..c8fac5592 --- /dev/null +++ b/.github/workflows/unit-test-llmruntime.yml @@ -0,0 +1,94 @@ +name: LLM Runtime Unit Test + +on: + pull_request: + branches: [main] + paths: + - intel_extension_for_transformers/llm/runtime/graph/** + - .github/workflows/unit-test-llmruntime.yml + - .github/workflows/script/unitTest/** + - '!intel_extension_for_transformers/llm/runtime/graph/docs/**' + - '!intel_extension_for_transformers/llm/runtime/graph/README.md' + workflow_dispatch: + +# If there is a new commit, the previous jobs will be canceled +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + DOCKER_CONFIG_NAME: "commonDockerConfig" + REPO_NAME: "intel-extension-for-transformers" + REPO_TAG: "py38" + DOCKER_FILE_NAME: "devel" + CONTAINER_NAME: "utTest" + EXTRA_CONTAINER_NAME: "modelTest" + +jobs: + unit-test: + runs-on: [self-hosted, linux, X64, llmruntime-node] + steps: + - name: Load environment variables + run: cat ~/actions-runner2/.env >> $GITHUB_ENV + + - name: Docker Clean Up + run: | + docker ps -a + if [[ $(docker ps -a | grep -i '${{ env.CONTAINER_NAME }}'$) ]]; then + docker start ${{ env.CONTAINER_NAME }} + echo "remove left files through container ..." + docker exec ${{ env.CONTAINER_NAME }} bash -c "ls -a /intel-extension-for-transformers && rm -fr /intel-extension-for-transformers/* && rm -fr /intel-extension-for-transformers/.* || true" + fi + if [[ $(docker ps -a | grep -i '${{ env.EXTRA_CONTAINER_NAME }}'$) ]]; then + docker start ${{ env.EXTRA_CONTAINER_NAME }} + echo "remove left files through container ..." + docker exec ${{ env.EXTRA_CONTAINER_NAME }} bash -c "ls -a /intel-extension-for-transformers && rm -fr /intel-extension-for-transformers/* && rm -fr /intel-extension-for-transformers/.* || true" + fi + + - name: Checkout out Repo + uses: actions/checkout@v3 + with: + submodules: "recursive" + fetch-tags: true + + - name: Docker Build + run: | + docker build -f ${{ github.workspace }}/.github/workflows/docker/${{ env.DOCKER_FILE_NAME }}.dockerfile --build-arg http_proxy="${{ env.HTTP_PROXY }}" --build-arg https_proxy="${{ env.HTTPS_PROXY }}" -t ${{ env.REPO_NAME }}:${{ env.REPO_TAG }} . + + - name: Docker Run + run: | + if [[ $(docker ps -a | grep -i '${{ env.CONTAINER_NAME }}'$) ]]; then + docker stop ${{ env.CONTAINER_NAME }} + docker rm -vf ${{ env.CONTAINER_NAME }} || true + fi + docker run -dit --disable-content-trust --privileged --name=${{ env.CONTAINER_NAME }} -v /dev/shm:/dev/shm \ + -e http_proxy="${{ env.HTTP_PROXY }}" \ + -e https_proxy="${{ env.HTTPS_PROXY }}" \ + -v ${{ github.workspace }}:/intel-extension-for-transformers \ + -v /tf_dataset2:/tf_dataset2 \ + -v ~/.cache/oneAPI:/cache \ + ${{ env.REPO_NAME }}:${{ env.REPO_TAG }} + + - name: Env build + run: | + docker exec ${{ env.CONTAINER_NAME }} \ + bash /intel-extension-for-transformers/.github/workflows/script/prepare_env.sh + + - name: Binary build + run: | + docker exec ${{ env.CONTAINER_NAME }} \ + bash -c "cd /intel-extension-for-transformers/.github/workflows/script \ + && bash install_binary.sh" + + - name: Run UT + run: | + docker exec ${{ env.CONTAINER_NAME }} \ + bash -c "cd /intel-extension-for-transformers/.github/workflows/script/unitTest \ + && bash run_unit_test_llmruntime.sh" + + - name: Publish pipeline artifact + uses: actions/upload-artifact@v3 + if: ${{ !cancelled() }} + with: + name: LLM Runtime Unit Test + path: ${{ github.workspace }}/log_dir/unit_test*.*