Skip to content

Commit

Permalink
chore(llm): bump tgi to 2.3.0
Browse files Browse the repository at this point in the history
  • Loading branch information
HoKim98 committed Sep 26, 2024
1 parent c25bf3c commit 7420005
Show file tree
Hide file tree
Showing 2 changed files with 204 additions and 2 deletions.
199 changes: 199 additions & 0 deletions templates/llm-a10.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,199 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-a10
namespace: api
labels:
app: llm
base: huggingface-hub
gpu: nvidia-a10
model: llama-3-70b
spec:
selector:
matchLabels:
app: llm
base: huggingface-hub
gpu: nvidia-a10
model: llama-3-70b
strategy:
type: Recreate # we have to reuse the gpus, not double-allocating
template:
metadata:
annotations:
instrumentation.opentelemetry.io/inject-sdk: "true"
labels:
app: llm
base: huggingface-hub
gpu: nvidia-a10
model: llama-3-70b
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: nvidia.com/gpu.product
operator: In
values:
- NVIDIA-A10
containers:
- name: tgi
image: ghcr.io/huggingface/text-generation-inference:2.3.0
imagePullPolicy: Always
args:
- --otlp-endpoint
- $(OTEL_EXPORTER_OTLP_ENDPOINT)
env:
# - name: CORS_ALLOW_ORIGIN
# value: "*"
# Specify the batch sizes to compute cuda graphs for.
# Use "0" to disable.
# Default = "1,2,4,8,16,32"
# - name: CUDA_GRAPHS
# value: "0"
# - name: DTYPE
# value: float16
- name: MAX_TOTAL_TOKENS
# value: "131072"
value: "4096"
- name: MODEL_ID
# value: yanolja/EEVE-Korean-Instruct-10.8B-v1.0
# value: beomi/Llama-3-Open-Ko-8B-Instruct-preview
value: meta-llama/Meta-Llama-3.1-70B-Instruct
- name: PORT
value: "8080"
# Possible values:
# - awq: 4 bit quantization. Requires a specific AWQ quantized model: <https://hf.co/models?search=awq>. Should replace GPTQ models wherever possible because of the better latency
# - eetq: 8 bit quantization, doesn't require specific model. Should be a drop-in replacement to bitsandbytes with much better performance. Kernels are from <https://github.com/NetEase-FuXi/EETQ.git>
# - gptq: 4 bit quantization. Requires a specific GTPQ quantized model: <https://hf.co/models?search=gptq>. llm-inference will use exllama (faster) kernels wherever possible, and use triton kernel (wider support) when it's not. AWQ has faster kernels
# - bitsandbytes: Bitsandbytes 8bit. Can be applied on any model, will cut the memory requirement in half, but it is known that the model will be much slower to run than the native f16
# - bitsandbytes-nf4: Bitsandbytes 4bit. Can be applied on any model, will cut the memory requirement by 4x, but it is known that the model will be much slower to run than the native f16
# - bitsandbytes-fp4: Bitsandbytes 4bit. nf4 should be preferred in most cases but maybe this one has better perplexity performance for you model
# - fp8: [FP8](https://developer.nvidia.com/blog/nvidia-arm-and-intel-publish-fp8-specification-for-standardization-as-an-interchange-format-for-ai/) (e4m3) works on H100 and above This dtype has native ops should be the fastest if available. This is currently not the fastest because of local unpacking + padding to satisfy matrix multiplication limitations
# - name: QUANTIZE
# value: eetq
- name: RUST_LOG
value: INFO
- name: SHARDED
value: "true"
# - name: TRUST_REMOTE_CODE
# value: "false"
resources:
limits:
# cpu: "8"
# memory: "300Gi"
nvidia.com/gpu: "8"
# FIXME: add readiness & liveness check
# FIXME: See https://github.com/huggingface/text-generation-inference/blob/8511669cb29115bdf0bc2da5328e69d041030996/router/src/server.rs#L128
ports:
- name: http
protocol: TCP
containerPort: 8080
volumeMounts:
- name: cache
mountPath: /data
- name: dshm
mountPath: /dev/shm
- name: huggingface-hub-token
mountPath: /root/.cache/huggingface/token
subPath: token
volumes:
- name: cache
persistentVolumeClaim:
claimName: huggingface-hub-cache
- name: dshm
emptyDir:
medium: Memory
sizeLimit: 16Gi
- name: huggingface-hub-token
secret:
secretName: huggingface-hub
items:
- key: HUGGING_FACE_HUB_TOKEN
path: token
---
apiVersion: v1
kind: Service
metadata:
name: llm
namespace: api
labels:
app: llm
model: llama-3-70b
spec:
type: ClusterIP
selector:
app: llm
model: llama-3-70b
ports:
- name: http
protocol: TCP
port: 80
targetPort: 8080
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: llm
namespace: api
annotations:
cert-manager.io/cluster-issuer: mobilex.kr
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/proxy-body-size: 20M
nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
spec:
ingressClassName: mobilex.kr
tls:
- hosts:
- api.mobilex.kr
secretName: api.mobilex.kr-certs
rules:
- host: api.mobilex.kr
http:
paths:
- path: /generate
pathType: Prefix
backend:
service:
name: llm
port:
number: 80
- path: /v1/chat
pathType: Prefix
backend:
service:
name: llm
port:
number: 80
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: llm-chatgpt
namespace: api
annotations:
cert-manager.io/cluster-issuer: mobilex.kr
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/proxy-body-size: 20M
nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
nginx.ingress.kubernetes.io/rewrite-target: /v1/chat/$2
spec:
ingressClassName: mobilex.kr
tls:
- hosts:
- api.mobilex.kr
secretName: api.mobilex.kr-certs
rules:
- host: api.mobilex.kr
http:
paths:
- path: /chat(/|$)(.*)
pathType: ImplementationSpecific
backend:
service:
name: llm
port:
number: 80
7 changes: 5 additions & 2 deletions templates/llm.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ spec:
base: huggingface-hub
gpu: nvidia-a100
model: llama-3-70b
strategy:
type: Recreate # we have to reuse the gpus, not double-allocating
template:
metadata:
annotations:
Expand All @@ -37,7 +39,7 @@ spec:
- NVIDIA-A100-SXM4-40GB
containers:
- name: tgi
image: ghcr.io/huggingface/text-generation-inference:2.2.0
image: ghcr.io/huggingface/text-generation-inference:2.3.0
imagePullPolicy: Always
args:
- --otlp-endpoint
Expand All @@ -53,7 +55,8 @@ spec:
# - name: DTYPE
# value: float16
- name: MAX_TOTAL_TOKENS
value: "131072"
# value: "131072"
value: "4096"
- name: MODEL_ID
# value: yanolja/EEVE-Korean-Instruct-10.8B-v1.0
# value: beomi/Llama-3-Open-Ko-8B-Instruct-preview
Expand Down

0 comments on commit 7420005

Please sign in to comment.