Skip to content

Commit

Permalink
[inference][issue-67] support pip install in docker container (intel#81)
Browse files Browse the repository at this point in the history
* [inference][issue-67] support pip install in docker container

Signed-off-by: Jiafu Zhang <[email protected]>

* [inference][issue-67] support pip install in docker container
add one more pip non-editable test for real user case

Signed-off-by: Jiafu Zhang <[email protected]>

* [inference][issue-67] support pip install in docker container
add one more pip non-editable test for real user case - gpt-j-6b

Signed-off-by: Jiafu Zhang <[email protected]>

* [inference][issue-67] support pip install in docker container
revert unintentional changes

Signed-off-by: Jiafu Zhang <[email protected]>

---------

Signed-off-by: Jiafu Zhang <[email protected]>
  • Loading branch information
jiafuzha authored Jan 23, 2024
1 parent 6e32361 commit fc06deb
Show file tree
Hide file tree
Showing 7 changed files with 48 additions and 1 deletion.
2 changes: 2 additions & 0 deletions .github/workflows/workflow_inference.yml
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,8 @@ jobs:
DF_SUFFIX=".bigdl-cpu"
elif [[ ${{ matrix.model }} == "llama-2-7b-chat-hf-vllm" ]]; then
DF_SUFFIX=".vllm"
elif [[ ${{ matrix.model }} == "gpt-j-6b" ]]; then
DF_SUFFIX=".cpu_and_deepspeed.pip_non_editable"
else
DF_SUFFIX=".cpu_and_deepspeed"
fi
Expand Down
2 changes: 2 additions & 0 deletions MANIFEST.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# with [tools.setuptools] in pyproject.toml, the configs below work in both baremetal and container
include inference/**/*.yaml
1 change: 1 addition & 0 deletions dev/docker/Dockerfile.bigdl-cpu
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ RUN --mount=type=cache,target=/opt/conda/pkgs conda init bash && \
conda install python==3.9

COPY ./pyproject.toml .
COPY ./MANIFEST.in .

RUN mkdir ./finetune && mkdir ./inference

Expand Down
1 change: 1 addition & 0 deletions dev/docker/Dockerfile.cpu_and_deepspeed
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ RUN --mount=type=cache,target=/opt/conda/pkgs conda init bash && \
conda install python==3.9

COPY ./pyproject.toml .
COPY ./MANIFEST.in .

RUN mkdir ./finetune && mkdir ./inference

Expand Down
38 changes: 38 additions & 0 deletions dev/docker/Dockerfile.cpu_and_deepspeed.pip_non_editable
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
# syntax=docker/dockerfile:1
FROM ubuntu:22.04

ENV LANG C.UTF-8

WORKDIR /root/llm-on-ray

RUN --mount=type=cache,target=/var/cache/apt apt-get update -y \
&& apt-get install -y build-essential cmake wget curl git vim htop ssh net-tools \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*

ENV CONDA_DIR /opt/conda
RUN wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \
/bin/bash ~/miniconda.sh -b -p /opt/conda
ENV PATH $CONDA_DIR/bin:$PATH

# setup env
SHELL ["/bin/bash", "--login", "-c"]

RUN --mount=type=cache,target=/opt/conda/pkgs conda init bash && \
unset -f conda && \
export PATH=$CONDA_DIR/bin/:${PATH} && \
conda config --add channels intel && \
conda install python==3.9

# copy all checkedout file for later non-editable pip
COPY . .

RUN --mount=type=cache,target=/root/.cache/pip pip install .[cpu,deepspeed] -f https://developer.intel.com/ipex-whl-stable-cpu \
-f https://download.pytorch.org/whl/torch_stable.html

RUN ds_report

# Used to invalidate docker build cache with --build-arg CACHEBUST=$(date +%s)
ARG CACHEBUST=1
COPY ./dev/scripts/install-oneapi.sh /tmp
RUN /tmp/install-oneapi.sh
1 change: 1 addition & 0 deletions dev/docker/Dockerfile.vllm
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ RUN --mount=type=cache,target=/opt/conda/pkgs conda init bash && \
conda install -y -c conda-forge python==3.9 gxx=12.3 gxx_linux-64=12.3

COPY ./pyproject.toml .
COPY ./MANIFEST.in .
COPY ./dev/scripts/install-vllm-cpu.sh .

RUN mkdir ./finetune && mkdir ./inference
Expand Down
4 changes: 3 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,9 @@ ui = [
]

[tool.setuptools]
packages = ["finetune", "inference"]
# with MANIFEST.in, the configs below work in both baremetal and container
package-dir = {"inference" = "inference", "finetune" = "finetune"}
include-package-data = true

[project.urls]
Repository = "https://github.com/intel/llm-on-ray.git"
Expand Down

0 comments on commit fc06deb

Please sign in to comment.