diff --git a/eval_scripts/mplug_owl_2/eval_image_quality.py b/eval_scripts/mplug_owl_2/eval_image_quality.py index 124c109..8da2f62 100644 --- a/eval_scripts/mplug_owl_2/eval_image_quality.py +++ b/eval_scripts/mplug_owl_2/eval_image_quality.py @@ -51,22 +51,22 @@ def main(args): image_paths = [ + "../datasets/LIVEC/Images/", "../datasets/AGIQA-3K/database/", "../datasets/1024x768/", "../datasets/SPAQ/", "../datasets/FLIVE_Database/database/", - "../datasets/LIVEC/Images/", "../datasets/CGIQA-6K/database/", "../datasets/kadid10k/images/", ] json_prefix = "../datasets/json/" jsons = [ + json_prefix + "livec.json", json_prefix + "agi.json", json_prefix + "koniq.json", json_prefix + "spaq.json", json_prefix + "flive.json", - json_prefix + "livec.json", json_prefix + "cgi.json", json_prefix + "kadid.json", ] diff --git a/eval_scripts/mplug_owl_2/eval_video_quality.py b/eval_scripts/mplug_owl_2/eval_video_quality.py index 32f5619..aa3ac25 100644 --- a/eval_scripts/mplug_owl_2/eval_video_quality.py +++ b/eval_scripts/mplug_owl_2/eval_video_quality.py @@ -96,7 +96,7 @@ def main(args): images = load_video(image_path + filename) llddata["logit_good"] = 0 llddata["logit_poor"] = 0 - + for image in images: image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'].half().cuda() diff --git a/model_zoo/README.md b/model_zoo/README.md index 910a41c..27c271c 100644 --- a/model_zoo/README.md +++ b/model_zoo/README.md @@ -21,7 +21,7 @@ If your server is facing poor connection to Huggingface, we provide an alternati #### Step 1: Download Weights -The links are as follows (WIP): +The links are as follows: _Released_: @@ -29,18 +29,15 @@ _Released_: - [LLaVA-v1.5-7B (mix)](https://www.modelscope.cn/models/qfuture/llava_v1.5_7b_qinstruct_preview_v0.1), ModelScope-path: `qfuture/llava_v1.5_7b_qinstruct_preview_v0.1` - [LLaVA-v1.5-13B (mix)](https://www.modelscope.cn/models/qfuture/llava_v1.5_13b_qinstruct_preview_v0.1), ModelScope-path: `qfuture/llava_v1.5_13b_qinstruct_preview_v0.1` - [mPLUG-Owl-2 (mix)](https://www.modelscope.cn/models/qfuture/mplug_owl_2_qinstruct_preview_v0.1), ModelScope-path: `qfuture/mplug_owl_2_qinstruct_preview_v0.1` - - -_Coming Soon_: - -- InternLM-XComposer-VL (mix) +- [InternLM-XComposer-VL (mix)](https://www.modelscope.cn/models/qfuture/internlm_xcomposer_vl_qinstruct_preview_v0.1), ModelScope-path: `qfuture/internlm_xcomposer_vl_qinstruct_preview_v0.1` To use them, you need to install `Git LFS` and then clone the repositories directly from ModelScope, under the main directory of Q-Instruct. ```shell -git clone https://www.modelscope.cn/models/qfuture/$MODEL_NAME_qinstruct_preview_v0.1.git +git clone https://www.modelscope.cn/qfuture/$MODEL_NAME_qinstruct_preview_v0.1.git ``` + #### Step 2: Redirect the Model Paths to Your Local Directory After that, modify the `model_path` in [quick start](../README.md#quick-start) to the local path (i.e. `$MODEL_NAME_qinstruct_preview_v0.1`) to smoothly load the weights downloaded from ModelScope.