letianWoowoof commited on
Commit
ce3a807
·
verified ·
1 Parent(s): d07b293

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +91 -52
app.py CHANGED
@@ -1,64 +1,103 @@
 
 
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("letianWoowoof/woowoofv1")
8
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
 
 
 
 
 
 
25
 
26
- messages.append({"role": "user", "content": message})
 
 
 
 
 
 
 
 
 
 
27
 
28
- response = ""
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
 
41
 
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
 
62
 
63
- if __name__ == "__main__":
64
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import MllamaForConditionalGeneration, AutoProcessor, TextIteratorStreamer
2
+ from PIL import Image
3
+ import requests
4
+ import torch
5
+ from threading import Thread
6
  import gradio as gr
7
+ from gradio import FileData
8
+ import time
9
+ import spaces
10
+ ckpt = ""
11
+ model = MllamaForConditionalGeneration.from_pretrained(ckpt,
12
+ torch_dtype=torch.bfloat16).to("cuda")
13
+ processor = AutoProcessor.from_pretrained(ckpt)
14
 
 
 
 
 
15
 
16
+ processor = AutoProcessor.from_pretrained("letianWoowoof/woowoofv1")
17
+ model = AutoModelForPreTraining.from_pretrained("letianWoowoof/woowoofv1")
18
 
19
+ @spaces.GPU
20
+ def bot_streaming(message, history, max_new_tokens=250):
21
+
22
+ txt = message["text"]
23
+ ext_buffer = f"{txt}"
24
+
25
+ messages= []
26
+ images = []
27
+
28
 
29
+ for i, msg in enumerate(history):
30
+ if isinstance(msg[0], tuple):
31
+ messages.append({"role": "user", "content": [{"type": "text", "text": history[i+1][0]}, {"type": "image"}]})
32
+ messages.append({"role": "assistant", "content": [{"type": "text", "text": history[i+1][1]}]})
33
+ images.append(Image.open(msg[0][0]).convert("RGB"))
34
+ elif isinstance(history[i-1], tuple) and isinstance(msg[0], str):
35
+ # messages are already handled
36
+ pass
37
+ elif isinstance(history[i-1][0], str) and isinstance(msg[0], str): # text only turn
38
+ messages.append({"role": "user", "content": [{"type": "text", "text": msg[0]}]})
39
+ messages.append({"role": "assistant", "content": [{"type": "text", "text": msg[1]}]})
40
 
41
+ # add current message
42
+ if len(message["files"]) == 1:
43
+
44
+ if isinstance(message["files"][0], str): # examples
45
+ image = Image.open(message["files"][0]).convert("RGB")
46
+ else: # regular input
47
+ image = Image.open(message["files"][0]["path"]).convert("RGB")
48
+ images.append(image)
49
+ messages.append({"role": "user", "content": [{"type": "text", "text": txt}, {"type": "image"}]})
50
+ else:
51
+ messages.append({"role": "user", "content": [{"type": "text", "text": txt}]})
52
 
 
53
 
54
+ texts = processor.apply_chat_template(messages, add_generation_prompt=True)
 
 
 
 
 
 
 
55
 
56
+ if images == []:
57
+ inputs = processor(text=texts, return_tensors="pt").to("cuda")
58
+ else:
59
+ inputs = processor(text=texts, images=images, return_tensors="pt").to("cuda")
60
+ streamer = TextIteratorStreamer(processor, skip_special_tokens=True, skip_prompt=True)
61
 
62
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=max_new_tokens)
63
+ generated_text = ""
64
+
65
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
66
+ thread.start()
67
+ buffer = ""
68
+
69
+ for new_text in streamer:
70
+ buffer += new_text
71
+ generated_text_without_prompt = buffer
72
+ time.sleep(0.01)
73
+ yield buffer
 
 
 
 
 
 
 
74
 
75
 
76
+ demo = gr.ChatInterface(fn=bot_streaming, title="Multimodal Llama", examples=[
77
+ [{"text": "Which era does this piece belong to? Give details about the era.", "files":["./examples/rococo.jpg"]},
78
+ 200],
79
+ [{"text": "Where do the droughts happen according to this diagram?", "files":["./examples/weather_events.png"]},
80
+ 250],
81
+ [{"text": "What happens when you take out white cat from this chain?", "files":["./examples/ai2d_test.jpg"]},
82
+ 250],
83
+ [{"text": "How long does it take from invoice date to due date? Be short and concise.", "files":["./examples/invoice.png"]},
84
+ 250],
85
+ [{"text": "Where to find this monument? Can you give me other recommendations around the area?", "files":["./examples/wat_arun.jpg"]},
86
+ 250],
87
+ ],
88
+ textbox=gr.MultimodalTextbox(),
89
+ additional_inputs = [gr.Slider(
90
+ minimum=10,
91
+ maximum=500,
92
+ value=250,
93
+ step=10,
94
+ label="Maximum number of new tokens to generate",
95
+ )
96
+ ],
97
+ cache_examples=False,
98
+ description="Try Multimodal Llama by Meta with transformers in this demo. Upload an image, and start chatting about it, or simply try one of the examples below. To learn more about Llama Vision, visit [our blog post](https://huggingface.co/blog/llama32). ",
99
+ stop_btn="Stop Generation",
100
+ fill_height=True,
101
+ multimodal=True)
102
+
103
+ demo.launch(debug=True)