Skip to content

Commit

Permalink
Add new test cases for VisualQnA (#712)
Browse files Browse the repository at this point in the history
* Add new test cases for VisualQnA

Signed-off-by: lvliang-intel <[email protected]>
  • Loading branch information
lvliang-intel committed Sep 4, 2024
1 parent 9cf1d88 commit 995a62c
Show file tree
Hide file tree
Showing 7 changed files with 376 additions and 75 deletions.
24 changes: 24 additions & 0 deletions VisualQnA/docker/docker_build_compose.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

services:
visualqna:
build:
args:
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
no_proxy: ${no_proxy}
dockerfile: ./Dockerfile
image: ${REGISTRY:-opea}/visualqna:${TAG:-latest}
visualqna-ui:
build:
context: ui
dockerfile: ./docker/Dockerfile
extends: visualqna
image: ${REGISTRY:-opea}/visualqna-ui:${TAG:-latest}
llm-visualqna-tgi:
build:
context: GenAIComps
dockerfile: comps/lvms/Dockerfile_tgi
extends: visualqna
image: ${REGISTRY:-opea}/lvm-tgi:${TAG:-latest}
9 changes: 2 additions & 7 deletions VisualQnA/docker/gaudi/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,10 @@ cd GenAIComps
docker build --no-cache -t opea/lvm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/Dockerfile_tgi .
```

### 3. Build TGI Gaudi Image

Since TGI Gaudi has not supported llava-next in main branch, we'll need to build it from a PR branch for now.
### 3. Pull TGI Gaudi Image

```bash
git clone https://github.com/huggingface/tgi-gaudi.git
cd tgi-gaudi/
docker build -t opea/llava-tgi:latest .
cd ../
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.4
```

### 4. Build MegaService Docker Image
Expand Down
2 changes: 1 addition & 1 deletion VisualQnA/docker/gaudi/compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

services:
llava-tgi-service:
image: ${REGISTRY:-opea}/llava-tgi:${TAG:-latest}
image: ghcr.io/huggingface/tgi-gaudi:2.0.4
container_name: tgi-llava-gaudi-server
ports:
- "8399:80"
Expand Down
2 changes: 1 addition & 1 deletion VisualQnA/docker/xeon/compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ services:
command: --model-id ${LVM_MODEL_ID} --max-input-length 4096 --max-total-tokens 8192 --cuda-graphs 0
lvm-tgi:
image: ${REGISTRY:-opea}/lvm-tgi:${TAG:-latest}
container_name: lvm-tgi-server
container_name: lvm-tgi-xeon-server
depends_on:
- llava-tgi-service
ports:
Expand Down
66 changes: 0 additions & 66 deletions VisualQnA/tests/test_basic_inference.sh

This file was deleted.

174 changes: 174 additions & 0 deletions VisualQnA/tests/test_visualqna_on_gaudi.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,174 @@
#!/bin/bash
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

set -x
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
export REGISTRY=${IMAGE_REPO}
export TAG=${IMAGE_TAG}

WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')

function build_docker_images() {
cd $WORKPATH/docker
git clone https://github.com/opea-project/GenAIComps.git

echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="visualqna visualqna-ui llm-visualqna-tgi"
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log

docker pull ghcr.io/huggingface/tgi-gaudi:2.0.4
docker images && sleep 1s
}

function start_services() {
cd $WORKPATH/docker/gaudi

export LVM_MODEL_ID="llava-hf/llava-v1.6-mistral-7b-hf"
export LVM_ENDPOINT="http://${ip_address}:8399"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export LVM_SERVICE_PORT=9399
export MEGA_SERVICE_HOST_IP=${ip_address}
export LVM_SERVICE_HOST_IP=${ip_address}
export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:8888/v1/visualqna"

sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env

# Start Docker Containers
docker compose up -d > ${LOG_PATH}/start_services_with_compose.log

n=0
until [[ "$n" -ge 100 ]]; do
docker logs lvm-tgi-gaudi-server > ${LOG_PATH}/lvm_tgi_service_start.log
if grep -q Connected ${LOG_PATH}/lvm_tgi_service_start.log; then
break
fi
sleep 5s
n=$((n+1))
done
}

function validate_services() {
local URL="$1"
local EXPECTED_RESULT="$2"
local SERVICE_NAME="$3"
local DOCKER_NAME="$4"
local INPUT_DATA="$5"

local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL")
if [ "$HTTP_STATUS" -eq 200 ]; then
echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..."

local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log)

if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then
echo "[ $SERVICE_NAME ] Content is as expected."
else
echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT"
docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log
exit 1
fi
else
echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS"
docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log
exit 1
fi
sleep 1s
}

function validate_microservices() {
# Check if the microservices are running correctly.

# lvm microservice
validate_services \
"${ip_address}:9399/v1/lvm" \
"The image" \
"lvm-tgi" \
"lvm-tgi-gaudi-server" \
'{"image": "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8/5+hnoEIwDiqkL4KAcT9GO0U4BxoAAAAAElFTkSuQmCC", "prompt":"What is this?"}'
}

function validate_megaservice() {
# Curl the Mega Service
validate_services \
"${ip_address}:8888/v1/visualqna" \
"The image" \
"visualqna-gaudi-backend-server" \
"visualqna-gaudi-backend-server" \
'{
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "What'\''s in this image?"
},
{
"type": "image_url",
"image_url": {
"url": "https://www.ilankelman.org/stopsigns/australia.jpg"
}
}
]
}
],
"max_tokens": 300
}'
}

function validate_frontend() {
cd $WORKPATH/docker/ui/svelte
local conda_env_name="OPEA_e2e"
export PATH=${HOME}/miniforge3/bin/:$PATH
if conda info --envs | grep -q "$conda_env_name"; then
echo "$conda_env_name exist!"
else
conda create -n ${conda_env_name} python=3.12 -y
fi
source activate ${conda_env_name}

sed -i "s/localhost/$ip_address/g" playwright.config.ts

conda install -c conda-forge nodejs -y
npm install && npm ci && npx playwright install --with-deps
node -v && npm -v && pip list

exit_status=0
npx playwright test || exit_status=$?

if [ $exit_status -ne 0 ]; then
echo "[TEST INFO]: ---------frontend test failed---------"
exit $exit_status
else
echo "[TEST INFO]: ---------frontend test passed---------"
fi
}

function stop_docker() {
cd $WORKPATH/docker/gaudi
docker compose stop && docker compose rm -f
}

function main() {

stop_docker

if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
start_services

validate_microservices
validate_megaservice
#validate_frontend

stop_docker
echo y | docker system prune

}

main
Loading

0 comments on commit 995a62c

Please sign in to comment.