diff --git a/gallery/index.yaml b/gallery/index.yaml index 66faea5f6a1..d091d5a2658 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -831,6 +831,33 @@ - filename: Llava_1.5_Llama3_mmproj_updated.gguf sha256: 4f2bb77ca60f2c932d1c6647d334f5d2cd71966c19e850081030c9883ef1906c uri: https://huggingface.co/ChaoticNeutrals/LLaVA-Llama-3-8B-mmproj-Updated/resolve/main/llava-v1.5-8B-Updated-Stop-Token/mmproj-model-f16.gguf +- !!merge <<: *llama3 + name: "bunny-llama-3-8b-v" + urls: + - https://huggingface.co/BAAI/Bunny-Llama-3-8B-V-gguf + description: | + Bunny is a family of lightweight but powerful multimodal models. It offers multiple plug-and-play vision encoders, like EVA-CLIP, SigLIP and language backbones, including Llama-3-8B, Phi-1.5, StableLM-2, Qwen1.5, MiniCPM and Phi-2. To compensate for the decrease in model size, we construct more informative training data by curated selection from a broader data source. + + We provide Bunny-Llama-3-8B-V, which is built upon SigLIP and Llama-3-8B-Instruct. More details about this model can be found in GitHub. + icon: https://huggingface.co/BAAI/Bunny-Llama-3-8B-V-gguf/resolve/main/icon.png + tags: + - llm + - multimodal + - gguf + - gpu + - llama3 + - cpu + overrides: + mmproj: Bunny-Llama-3-8B-Q4_K_M-mmproj.gguf + parameters: + model: Bunny-Llama-3-8B-Q4_K_M.gguf + files: + - filename: Bunny-Llama-3-8B-Q4_K_M-mmproj.gguf + sha256: 96d033387a91e56cf97fa5d60e02c0128ce07c8fa83aaaefb74ec40541615ea5 + uri: huggingace://BAAI/Bunny-Llama-3-8B-V-gguf/mmproj-model-f16.gguf + - filename: Bunny-Llama-3-8B-Q4_K_M.gguf + sha256: 88f0a61f947dbf129943328be7262ae82e3a582a0c75e53544b07f70355a7c30 + uri: huggingace://BAAI/Bunny-Llama-3-8B-V-gguf/ggml-model-Q4_K_M.gguf - !!merge <<: *llama3 name: "llava-llama-3-8b-v1_1" description: |