TobyYang7 commited on
Commit
ca317b2
1 Parent(s): ee668ff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -93
app.py CHANGED
@@ -1,104 +1,52 @@
1
- import time
2
- from threading import Thread
3
-
4
  import gradio as gr
5
- import torch
 
6
  from PIL import Image
7
- from transformers import AutoProcessor, LlavaForConditionalGeneration
8
- from transformers import TextIteratorStreamer
9
-
10
- import spaces
11
-
12
-
13
- PLACEHOLDER = """
14
- <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
15
- <img src="https://cdn-uploads.huggingface.co/production/uploads/64ccdc322e592905f922a06e/DDIW0kbWmdOQWwy4XMhwX.png" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; ">
16
- <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">LLaVA-Llama-3-8B</h1>
17
- <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Llava-Llama-3-8b is a LLaVA model fine-tuned from Meta-Llama-3-8B-Instruct and CLIP-ViT-Large-patch14-336 with ShareGPT4V-PT and InternVL-SFT by XTuner</p>
18
- </div>
19
- """
20
-
21
-
22
- model_id = "TheFinAI/FinLLaVA"
23
-
24
- processor = AutoProcessor.from_pretrained(model_id)
25
 
26
- model = LlavaForConditionalGeneration.from_pretrained(
27
- model_id,
28
- torch_dtype=torch.float16,
29
- low_cpu_mem_usage=True,
 
 
 
 
 
 
 
 
 
 
 
30
  )
31
 
32
- model.to("cuda:0")
33
- model.generation_config.eos_token_id = 128009
34
-
35
-
36
- @spaces.GPU
37
- def bot_streaming(message, history):
38
- print(message)
39
- if message["files"]:
40
- # message["files"][-1] is a Dict or just a string
41
- if type(message["files"][-1]) == dict:
42
- image = message["files"][-1]["path"]
43
- else:
44
- image = message["files"][-1]
45
- else:
46
- # if there's no image uploaded for this turn, look for images in the past turns
47
- # kept inside tuples, take the last one
48
- for hist in history:
49
- if type(hist[0]) == tuple:
50
- image = hist[0][0]
51
- try:
52
- if image is None:
53
- # Handle the case where image is None
54
- gr.Error("You need to upload an image for LLaVA to work.")
55
- except NameError:
56
- # Handle the case where 'image' is not defined at all
57
- gr.Error("You need to upload an image for LLaVA to work.")
58
-
59
- prompt = f"<|start_header_id|>user<|end_header_id|>\n\n<image>\n{message['text']}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
60
- # print(f"prompt: {prompt}")
61
- image = Image.open(image)
62
- inputs = processor(prompt, image, return_tensors='pt').to(0, torch.float16)
63
-
64
- streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": False, "skip_prompt": True})
65
- generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024, do_sample=False)
66
-
67
- thread = Thread(target=model.generate, kwargs=generation_kwargs)
68
- thread.start()
69
-
70
- text_prompt = f"<|start_header_id|>user<|end_header_id|>\n\n{message['text']}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
71
- # print(f"text_prompt: {text_prompt}")
72
-
73
- buffer = ""
74
- time.sleep(0.5)
75
- for new_text in streamer:
76
- # find <|eot_id|> and remove it from the new_text
77
- if "<|eot_id|>" in new_text:
78
- new_text = new_text.split("<|eot_id|>")[0]
79
- buffer += new_text
80
-
81
- # generated_text_without_prompt = buffer[len(text_prompt):]
82
- generated_text_without_prompt = buffer
83
- # print(generated_text_without_prompt)
84
- time.sleep(0.06)
85
- # print(f"new_text: {generated_text_without_prompt}")
86
- yield generated_text_without_prompt
87
-
88
 
89
- chatbot=gr.Chatbot(placeholder=PLACEHOLDER,scale=1)
90
  chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter message or upload file...", show_label=False)
91
- with gr.Blocks(fill_height=True, ) as demo:
 
92
  gr.ChatInterface(
93
- fn=bot_streaming,
94
- title="LLaVA Llama-3-8B",
95
- examples=[{"text": "What is on the flower?", "files": ["./bee.jpg"]},
96
- {"text": "How to make this pastry?", "files": ["./baklava.png"]}],
97
- description="Try [LLaVA Llama-3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers). Upload an image and start chatting about it, or simply try one of the examples below. If you don't upload an image, you will receive an error.",
98
- stop_btn="Stop Generation",
99
- multimodal=True,
100
- textbox=chat_input,
101
- chatbot=chatbot,
102
  )
103
 
104
  demo.queue(api_open=False)
 
 
 
 
1
  import gradio as gr
2
+ from llava_llama3.serve.cli import chat_llava
3
+ from llava_llama3.model.builder import load_pretrained_model
4
  from PIL import Image
5
+ import torch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ model_path = "TheFinAI/FinLLaVA"
8
+ device = "cuda"
9
+ conv_mode = "llama_3"
10
+ temperature = 0
11
+ max_new_tokens = 512
12
+ load_8bit = False
13
+ load_4bit = False
14
+
15
+ tokenizer, llava_model, image_processor, context_len = load_pretrained_model(
16
+ model_path,
17
+ None,
18
+ 'llava_llama3',
19
+ load_8bit,
20
+ load_4bit,
21
+ device=device
22
  )
23
 
24
+ def predict(image, text):
25
+ output = chat_llava(
26
+ args=None,
27
+ image_file=image,
28
+ text=text,
29
+ tokenizer=tokenizer,
30
+ model=llava_model,
31
+ image_processor=image_processor,
32
+ context_len=context_len
33
+ )
34
+ return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
+ chatbot = gr.Chatbot(placeholder=PLACEHOLDER, scale=1)
37
  chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter message or upload file...", show_label=False)
38
+
39
+ with gr.Blocks(fill_height=True) as demo:
40
  gr.ChatInterface(
41
+ fn=predict,
42
+ title="FinLLaVA",
43
+ examples=[{"text": "What is on the flower?", "files": ["./bee.jpg"]},
44
+ {"text": "How to make this pastry?", "files": ["./baklava.png"]}],
45
+ description="Try [LLaVA Llama-3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers). Upload an image and start chatting about it, or simply try one of the examples below. If you don't upload an image, you will receive an error.",
46
+ stop_btn="Stop Generation",
47
+ multimodal=True,
48
+ textbox=chat_input,
49
+ chatbot=chatbot,
50
  )
51
 
52
  demo.queue(api_open=False)