Skip to content

Commit

Permalink
Update otter_image_incontext.py
Browse files Browse the repository at this point in the history
  • Loading branch information
Luodian authored Jul 31, 2023
1 parent a8e11a7 commit 5e949c6
Showing 1 changed file with 1 addition and 3 deletions.
4 changes: 1 addition & 3 deletions pipeline/demo/otter_image_incontext.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,15 +79,13 @@ def get_response(image_list, prompt: str, model=None, image_processor=None, in_c
lang_x_input_ids = lang_x["input_ids"]
lang_x_attention_mask = lang_x["attention_mask"]

bad_words_id = model.text_tokenizer(["User:", "GPT1:", "GFT:", "GPT:"], add_special_tokens=False).input_ids
generated_text = model.generate(
vision_x=vision_x.to(model.device),
lang_x=lang_x_input_ids.to(model.device),
attention_mask=lang_x_attention_mask.to(model.device),
max_new_tokens=512,
num_beams=3,
no_repeat_ngram_size=3,
bad_words_ids=bad_words_id,
)
parsed_output = (
model.text_tokenizer.decode(generated_text[0])
Expand Down Expand Up @@ -115,7 +113,7 @@ def get_response(image_list, prompt: str, model=None, image_processor=None, in_c
precision["torch_dtype"] = torch.float16
elif load_bit == "fp32":
precision["torch_dtype"] = torch.float32
model = OtterForConditionalGeneration.from_pretrained("luodian/OTTER-9B-LA-InContext", device_map="sequential", **precision)
model = OtterForConditionalGeneration.from_pretrained("luodian/OTTER-Image-MPT7B", device_map="sequential", **precision)
model.text_tokenizer.padding_side = "left"
tokenizer = model.text_tokenizer
image_processor = transformers.CLIPImageProcessor()
Expand Down

0 comments on commit 5e949c6

Please sign in to comment.