erwannd commited on
Commit
4decb51
1 Parent(s): b975883

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -0
app.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ from threading import Thread
4
+
5
+ from transformers import LlavaNextProcessor, LlavaNextForConditionalGeneration
6
+ from transformers import TextIteratorStreamer
7
+ from PIL import Image
8
+ from peft import PeftModel
9
+ import requests
10
+ import torch, os, re, json
11
+ import time
12
+
13
+
14
+ base_model = "llava-hf/llava-v1.6-mistral-7b-hf"
15
+ finetune_repo = "erwannd/llava-v1.6-mistral-7b-finetune-combined4k"
16
+
17
+ processor = LlavaNextProcessor.from_pretrained(base_model)
18
+
19
+ model = LlavaNextForConditionalGeneration.from_pretrained(
20
+ base_model,
21
+ torch_dtype=torch.float16,
22
+ low_cpu_mem_usage=True,
23
+ )
24
+ model = PeftModel.from_pretrained(model, finetune_repo)
25
+ # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
26
+ model.to("cuda:0")
27
+
28
+
29
+ @spaces.GPU
30
+ def predict(image, input_text):
31
+ image = image.convert("RGB")
32
+ prompt = f"[INST] <image>\n{input_text} [/INST]"
33
+
34
+ inputs = processor(text=prompt, images=image, return_tensors="pt").to(0, torch.float16)
35
+
36
+ streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": True})
37
+ # generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=200, do_sample=False)
38
+
39
+ model.generate(**inputs, streamer=streamer, max_new_tokens=200, do_sample=False)
40
+
41
+ text_prompt = f"[INST] \n{input_text} [/INST]"
42
+
43
+ buffer = ""
44
+ time.sleep(0.5)
45
+ for new_text in streamer:
46
+ buffer += new_text
47
+ generated_text_without_prompt = buffer[len(text_prompt):]
48
+ time.sleep(0.04)
49
+ yield generated_text_without_prompt
50
+
51
+
52
+ # prompt_length = inputs['input_ids'].shape[1]
53
+ # generate_ids = model.generate(**inputs, max_new_tokens=512)
54
+ # output_text = processor.batch_decode(generate_ids[:, prompt_length:], skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
55
+ # return output_text
56
+
57
+
58
+ image = gr.components.Image(type="pil")
59
+ input_prompt = gr.components.Textbox(label="Input Prompt")
60
+ model_output = gr.components.Textbox(label="Model Output")
61
+ examples = [["./examples/bar_m01.png", "Evaluate and explain if this chart is misleading"],
62
+ ["./examples/bar_n01.png", "Is this chart misleading? Explain"],
63
+ ["./examples/fox_news_cropped.png", "Tell me if this chart is misleading"],
64
+ ["./examples/line_m01.png", "Explain if this chart is misleading"],
65
+ ["./examples/line_m04.png", "Evaluate and explain if this chart is misleading"],
66
+ ["./examples/pie_m01.png", "Evaluate if this chart is misleading, if so explain"],
67
+ ["./examples/pie_m02.png", "Is this chart misleading? Explain"]]
68
+
69
+ title = "LlavaNext finetuned on Misleading Chart Dataset"
70
+ interface = gr.Interface(
71
+ fn=predict,
72
+ inputs=[image, input_prompt],
73
+ outputs=model_output,
74
+ examples=examples,
75
+ title=title,
76
+ theme='gradio/soft',
77
+ cache_examples=False
78
+ )
79
+
80
+ interface.launch()