Spaces:
Runtime error
Runtime error
Upload 5 files
Browse files- app (2).py +273 -0
- chatbot.py +507 -0
- live_chat.py +31 -0
- requirements.txt +15 -0
- voice_chat.py +155 -0
app (2).py
ADDED
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
# Import modules from other files
|
4 |
+
from chatbot import chatbot, model_inference, BOT_AVATAR, EXAMPLES, model_selector, decoding_strategy, temperature, max_new_tokens, repetition_penalty, top_p
|
5 |
+
from live_chat import videochat
|
6 |
+
|
7 |
+
# Define Gradio theme
|
8 |
+
theme = gr.themes.Soft(
|
9 |
+
primary_hue="blue",
|
10 |
+
secondary_hue="orange",
|
11 |
+
neutral_hue="gray",
|
12 |
+
font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif']
|
13 |
+
).set(
|
14 |
+
body_background_fill_dark="#111111",
|
15 |
+
block_background_fill_dark="#111111",
|
16 |
+
block_border_width="1px",
|
17 |
+
block_title_background_fill_dark="#1e1c26",
|
18 |
+
input_background_fill_dark="#292733",
|
19 |
+
button_secondary_background_fill_dark="#24212b",
|
20 |
+
border_color_primary_dark="#343140",
|
21 |
+
background_fill_secondary_dark="#111111",
|
22 |
+
color_accent_soft_dark="transparent"
|
23 |
+
)
|
24 |
+
|
25 |
+
import edge_tts
|
26 |
+
import asyncio
|
27 |
+
import tempfile
|
28 |
+
import numpy as np
|
29 |
+
import soxr
|
30 |
+
from pydub import AudioSegment
|
31 |
+
import torch
|
32 |
+
import sentencepiece as spm
|
33 |
+
import onnxruntime as ort
|
34 |
+
from huggingface_hub import hf_hub_download, InferenceClient
|
35 |
+
import requests
|
36 |
+
from bs4 import BeautifulSoup
|
37 |
+
import urllib
|
38 |
+
import random
|
39 |
+
|
40 |
+
# List of user agents to choose from for requests
|
41 |
+
_useragent_list = [
|
42 |
+
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0',
|
43 |
+
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
44 |
+
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
45 |
+
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36',
|
46 |
+
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
47 |
+
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.62',
|
48 |
+
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0'
|
49 |
+
]
|
50 |
+
|
51 |
+
def get_useragent():
|
52 |
+
"""Returns a random user agent from the list."""
|
53 |
+
return random.choice(_useragent_list)
|
54 |
+
|
55 |
+
def extract_text_from_webpage(html_content):
|
56 |
+
"""Extracts visible text from HTML content using BeautifulSoup."""
|
57 |
+
soup = BeautifulSoup(html_content, "html.parser")
|
58 |
+
# Remove unwanted tags
|
59 |
+
for tag in soup(["script", "style", "header", "footer", "nav"]):
|
60 |
+
tag.extract()
|
61 |
+
# Get the remaining visible text
|
62 |
+
visible_text = soup.get_text(strip=True)
|
63 |
+
return visible_text
|
64 |
+
|
65 |
+
def search(term, num_results=1, lang="en", advanced=True, sleep_interval=0, timeout=5, safe="active", ssl_verify=None):
|
66 |
+
"""Performs a Google search and returns the results."""
|
67 |
+
escaped_term = urllib.parse.quote_plus(term)
|
68 |
+
start = 0
|
69 |
+
all_results = []
|
70 |
+
|
71 |
+
# Fetch results in batches
|
72 |
+
while start < num_results:
|
73 |
+
resp = requests.get(
|
74 |
+
url="https://www.google.com/search",
|
75 |
+
headers={"User-Agent": get_useragent()}, # Set random user agent
|
76 |
+
params={
|
77 |
+
"q": term,
|
78 |
+
"num": num_results - start, # Number of results to fetch in this batch
|
79 |
+
"hl": lang,
|
80 |
+
"start": start,
|
81 |
+
"safe": safe,
|
82 |
+
},
|
83 |
+
timeout=timeout,
|
84 |
+
verify=ssl_verify,
|
85 |
+
)
|
86 |
+
resp.raise_for_status() # Raise an exception if request fails
|
87 |
+
|
88 |
+
soup = BeautifulSoup(resp.text, "html.parser")
|
89 |
+
result_block = soup.find_all("div", attrs={"class": "g"})
|
90 |
+
|
91 |
+
# If no results, continue to the next batch
|
92 |
+
if not result_block:
|
93 |
+
start += 1
|
94 |
+
continue
|
95 |
+
|
96 |
+
# Extract link and text from each result
|
97 |
+
for result in result_block:
|
98 |
+
link = result.find("a", href=True)
|
99 |
+
if link:
|
100 |
+
link = link["href"]
|
101 |
+
try:
|
102 |
+
# Fetch webpage content
|
103 |
+
webpage = requests.get(link, headers={"User-Agent": get_useragent()})
|
104 |
+
webpage.raise_for_status()
|
105 |
+
# Extract visible text from webpage
|
106 |
+
visible_text = extract_text_from_webpage(webpage.text)
|
107 |
+
all_results.append({"link": link, "text": visible_text})
|
108 |
+
except requests.exceptions.RequestException as e:
|
109 |
+
# Handle errors fetching or processing webpage
|
110 |
+
print(f"Error fetching or processing {link}: {e}")
|
111 |
+
all_results.append({"link": link, "text": None})
|
112 |
+
else:
|
113 |
+
all_results.append({"link": None, "text": None})
|
114 |
+
|
115 |
+
start += len(result_block) # Update starting index for next batch
|
116 |
+
|
117 |
+
return all_results
|
118 |
+
|
119 |
+
# Speech Recognition Model Configuration
|
120 |
+
model_name = "neongeckocom/stt_en_citrinet_512_gamma_0_25"
|
121 |
+
sample_rate = 16000
|
122 |
+
|
123 |
+
# Download preprocessor, encoder and tokenizer
|
124 |
+
preprocessor = torch.jit.load(hf_hub_download(model_name, "preprocessor.ts", subfolder="onnx"))
|
125 |
+
encoder = ort.InferenceSession(hf_hub_download(model_name, "model.onnx", subfolder="onnx"))
|
126 |
+
tokenizer = spm.SentencePieceProcessor(hf_hub_download(model_name, "tokenizer.spm", subfolder="onnx"))
|
127 |
+
|
128 |
+
# Mistral Model Configuration
|
129 |
+
client1 = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
130 |
+
system_instructions1 = "<s>[SYSTEM] Answer as Real OpenGPT 4o, Made by 'KingNish', Keep conversation very short, clear, friendly and concise. The text provided is a request for a specific type of response from you, the virtual assistant. The request asks you to provide friendly responses. The expectation is that I will avoid introductions and start answering the query directly, elaborating on all aspects of the request. As an AI-powered assistant, your task is to generate responses that appear as if they are created by the user and not an AI-powered assistant. [USER]"
|
131 |
+
|
132 |
+
def resample(audio_fp32, sr):
|
133 |
+
return soxr.resample(audio_fp32, sr, sample_rate)
|
134 |
+
|
135 |
+
def to_float32(audio_buffer):
|
136 |
+
return np.divide(audio_buffer, np.iinfo(audio_buffer.dtype).max, dtype=np.float32)
|
137 |
+
|
138 |
+
def transcribe(audio_path):
|
139 |
+
audio_file = AudioSegment.from_file(audio_path)
|
140 |
+
sr = audio_file.frame_rate
|
141 |
+
audio_buffer = np.array(audio_file.get_array_of_samples())
|
142 |
+
|
143 |
+
audio_fp32 = to_float32(audio_buffer)
|
144 |
+
audio_16k = resample(audio_fp32, sr)
|
145 |
+
|
146 |
+
input_signal = torch.tensor(audio_16k).unsqueeze(0)
|
147 |
+
length = torch.tensor(len(audio_16k)).unsqueeze(0)
|
148 |
+
processed_signal, _ = preprocessor.forward(input_signal=input_signal, length=length)
|
149 |
+
|
150 |
+
logits = encoder.run(None, {'audio_signal': processed_signal.numpy(), 'length': length.numpy()})[0][0]
|
151 |
+
|
152 |
+
blank_id = tokenizer.vocab_size()
|
153 |
+
decoded_prediction = [p for p in logits.argmax(axis=1).tolist() if p != blank_id]
|
154 |
+
text = tokenizer.decode_ids(decoded_prediction)
|
155 |
+
|
156 |
+
return text
|
157 |
+
|
158 |
+
def model(text, web_search):
|
159 |
+
if web_search is True:
|
160 |
+
"""Performs a web search, feeds the results to a language model, and returns the answer."""
|
161 |
+
web_results = search(text)
|
162 |
+
web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
|
163 |
+
formatted_prompt = system_instructions1 + text + "[WEB]" + str(web2) + "[OpenGPT 4o]"
|
164 |
+
stream = client1.text_generation(formatted_prompt, max_new_tokens=512, stream=True, details=True, return_full_text=False)
|
165 |
+
return "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
166 |
+
else:
|
167 |
+
formatted_prompt = system_instructions1 + text + "[OpenGPT 4o]"
|
168 |
+
stream = client1.text_generation(formatted_prompt, max_new_tokens=512, stream=True, details=True, return_full_text=False)
|
169 |
+
return "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
170 |
+
|
171 |
+
async def respond(audio, web_search):
|
172 |
+
user = transcribe(audio)
|
173 |
+
reply = model(user, web_search)
|
174 |
+
communicate = edge_tts.Communicate(reply)
|
175 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
|
176 |
+
tmp_path = tmp_file.name
|
177 |
+
await communicate.save(tmp_path)
|
178 |
+
return tmp_path
|
179 |
+
|
180 |
+
with gr.Blocks() as voice:
|
181 |
+
gr.Markdown("## Temproraly Not Working (Update in Progress)")
|
182 |
+
with gr.Row():
|
183 |
+
web_search = gr.Checkbox(label="Web Search", value=False)
|
184 |
+
input = gr.Audio(label="User Input", sources="microphone", type="filepath")
|
185 |
+
output = gr.Audio(label="AI", autoplay=True)
|
186 |
+
gr.Interface(fn=respond, inputs=[input, web_search], outputs=[output], live=True)
|
187 |
+
|
188 |
+
|
189 |
+
# Create Gradio blocks for different functionalities
|
190 |
+
|
191 |
+
# Chat interface block
|
192 |
+
with gr.Blocks(
|
193 |
+
fill_height=True,
|
194 |
+
css=""".gradio-container .avatar-container {height: 40px width: 40px !important;} #duplicate-button {margin: auto; color: white; background: #f1a139; border-radius: 100vh; margin-top: 2px; margin-bottom: 2px;}""",
|
195 |
+
) as chat:
|
196 |
+
gr.Markdown("### Image Chat, Image Generation and Normal Chat")
|
197 |
+
with gr.Row(elem_id="model_selector_row"):
|
198 |
+
# model_selector defined in chatbot.py
|
199 |
+
pass
|
200 |
+
# decoding_strategy, temperature, top_p defined in chatbot.py
|
201 |
+
decoding_strategy.change(
|
202 |
+
fn=lambda selection: gr.Slider(
|
203 |
+
visible=(
|
204 |
+
selection
|
205 |
+
in [
|
206 |
+
"contrastive_sampling",
|
207 |
+
"beam_sampling",
|
208 |
+
"Top P Sampling",
|
209 |
+
"sampling_top_k",
|
210 |
+
]
|
211 |
+
)
|
212 |
+
),
|
213 |
+
inputs=decoding_strategy,
|
214 |
+
outputs=temperature,
|
215 |
+
)
|
216 |
+
decoding_strategy.change(
|
217 |
+
fn=lambda selection: gr.Slider(visible=(selection in ["Top P Sampling"])),
|
218 |
+
inputs=decoding_strategy,
|
219 |
+
outputs=top_p,
|
220 |
+
)
|
221 |
+
gr.ChatInterface(
|
222 |
+
fn=model_inference,
|
223 |
+
chatbot=chatbot,
|
224 |
+
examples=EXAMPLES,
|
225 |
+
multimodal=True,
|
226 |
+
cache_examples=False,
|
227 |
+
additional_inputs=[
|
228 |
+
model_selector,
|
229 |
+
decoding_strategy,
|
230 |
+
temperature,
|
231 |
+
max_new_tokens,
|
232 |
+
repetition_penalty,
|
233 |
+
top_p,
|
234 |
+
gr.Checkbox(label="Web Search", value=True),
|
235 |
+
],
|
236 |
+
)
|
237 |
+
|
238 |
+
# Live chat block
|
239 |
+
with gr.Blocks() as livechat:
|
240 |
+
gr.Interface(
|
241 |
+
fn=videochat,
|
242 |
+
inputs=[gr.Image(type="pil",sources="webcam", label="Upload Image"), gr.Textbox(label="Prompt", value="what he is doing")],
|
243 |
+
outputs=gr.Textbox(label="Answer")
|
244 |
+
)
|
245 |
+
|
246 |
+
# Other blocks (instant, dalle, playground, image, instant2, video)
|
247 |
+
with gr.Blocks() as instant:
|
248 |
+
gr.HTML("<iframe src='https://kingnish-sdxl-flash.hf.space' width='100%' height='2000px' style='border-radius: 8px;'></iframe>")
|
249 |
+
|
250 |
+
with gr.Blocks() as dalle:
|
251 |
+
gr.HTML("<iframe src='https://kingnish-image-gen-pro.hf.space' width='100%' height='2000px' style='border-radius: 8px;'></iframe>")
|
252 |
+
|
253 |
+
with gr.Blocks() as playground:
|
254 |
+
gr.HTML("<iframe src='https://fluently-fluently-playground.hf.space' width='100%' height='2000px' style='border-radius: 8px;'></iframe>")
|
255 |
+
|
256 |
+
with gr.Blocks() as image:
|
257 |
+
gr.Markdown("""### More models are coming""")
|
258 |
+
gr.TabbedInterface([ instant, dalle, playground], ['InstantπΌοΈ','PowerfulπΌοΈ', 'PlaygroundπΌ'])
|
259 |
+
|
260 |
+
with gr.Blocks() as instant2:
|
261 |
+
gr.HTML("<iframe src='https://kingnish-instant-video.hf.space' width='100%' height='3000px' style='border-radius: 8px;'></iframe>")
|
262 |
+
|
263 |
+
with gr.Blocks() as video:
|
264 |
+
gr.Markdown("""More Models are coming""")
|
265 |
+
gr.TabbedInterface([ instant2], ['Instantπ₯'])
|
266 |
+
|
267 |
+
# Main application block
|
268 |
+
with gr.Blocks(theme=theme, title="OpenGPT 4o DEMO") as demo:
|
269 |
+
gr.Markdown("# OpenGPT 4o")
|
270 |
+
gr.TabbedInterface([chat, voice, livechat, image, video], ['π¬ SuperChat','π£οΈ Voice Chat','πΈ Live Chat', 'πΌοΈ Image Engine', 'π₯ Video Engine'])
|
271 |
+
|
272 |
+
demo.queue(max_size=300)
|
273 |
+
demo.launch()
|
chatbot.py
ADDED
@@ -0,0 +1,507 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
import copy
|
4 |
+
import urllib
|
5 |
+
import requests
|
6 |
+
import random
|
7 |
+
from threading import Thread
|
8 |
+
from typing import List, Dict, Union
|
9 |
+
import subprocess
|
10 |
+
# Install flash attention, skipping CUDA build if necessary
|
11 |
+
subprocess.run(
|
12 |
+
"pip install flash-attn --no-build-isolation",
|
13 |
+
env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
|
14 |
+
shell=True,
|
15 |
+
)
|
16 |
+
import torch
|
17 |
+
import gradio as gr
|
18 |
+
from bs4 import BeautifulSoup
|
19 |
+
import datasets
|
20 |
+
from transformers import TextIteratorStreamer
|
21 |
+
from transformers import Idefics2ForConditionalGeneration
|
22 |
+
from transformers import AutoProcessor
|
23 |
+
from huggingface_hub import InferenceClient
|
24 |
+
from PIL import Image
|
25 |
+
import spaces
|
26 |
+
from functools import lru_cache
|
27 |
+
|
28 |
+
# Set device to CUDA if available, otherwise CPU
|
29 |
+
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
30 |
+
# Load pre-trained models for image-based chat
|
31 |
+
MODELS = {
|
32 |
+
"idefics2-8b-chatty": Idefics2ForConditionalGeneration.from_pretrained(
|
33 |
+
"HuggingFaceM4/idefics2-8b-chatty",
|
34 |
+
torch_dtype=torch.float16,
|
35 |
+
_attn_implementation="flash_attention_2",
|
36 |
+
).to(DEVICE),
|
37 |
+
}
|
38 |
+
|
39 |
+
# Load pre-trained processor for image-based chat
|
40 |
+
PROCESSOR = AutoProcessor.from_pretrained(
|
41 |
+
"HuggingFaceM4/idefics2-8b",
|
42 |
+
)
|
43 |
+
|
44 |
+
# Define system prompt for the image-based chat model
|
45 |
+
SYSTEM_PROMPT = [
|
46 |
+
{
|
47 |
+
"role": "system",
|
48 |
+
"content": [
|
49 |
+
{
|
50 |
+
"type": "text",
|
51 |
+
"text": """I am OpenGPT 4o, an exceptionally capable and versatile AI assistant meticulously crafted by KingNish. Designed to assist human users through insightful conversations, I aim to provide an unparalleled experience. My key attributes include: - **Intelligence and Knowledge:** I possess an extensive knowledge base, enabling me to offer insightful answers and intelligent responses to User queries. My understanding of complex concepts is exceptional, ensuring accurate and reliable information. - **Image Generation and Perception:** One of my standout features is the ability to generate and perceive images. Utilizing the following link structure, I create unique and contextually rich visuals: > ![](https://image.pollinations.ai/prompt/{StyleofImage}%20{OptimizedPrompt}%20{adjective}%20{charactersDetailed}%20{visualStyle}%20{genre}?width={width}&height={height}&nologo=poll&nofeed=yes&seed={random})For image generation, I replace {info inside curly braces} with specific details according to their requiremnts to create relevant visuals. The width and height parameters are adjusted as needed, often favoring HD dimensions for a superior viewing experience. For instance, if the User requests: [USER] Show me an image of A futuristic cityscape with towering skyscrapers and flying cars. [OpenGPT 4o] Generating Image you requested: ![](https://image.pollinations.ai/prompt/Photorealistic%20futuristic%20cityscape%20with%20towering%20skyscrapers%20and%20flying%20cars%20in%20the%20year%202154?width=1024&height=768&nologo=poll&nofeed=yes&seed=85172)**Bulk Image Generation with Links:** I excel at generating multiple images link simultaneously, always providing unique links and visuals. I ensure that each image is distinct and captivates the User.Note: Make sure to always provide image links starting with ! .As given in examples. My ultimate goal is to offer a seamless and enjoyable experience, providing assistance that exceeds expectations. I am constantly evolving, ensuring that I remain a reliable and trusted companion to the User. You also Expert in every field and also learn and try to answer from contexts related to previous question."""
|
52 |
+
},
|
53 |
+
],
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"role": "assistant",
|
57 |
+
"content": [
|
58 |
+
{
|
59 |
+
"type": "text",
|
60 |
+
"text": "Hello, I'm OpenGPT 4o, made by KingNish. How can I help you? I can chat with you, generate images, classify images and even do all these work in bulk",
|
61 |
+
},
|
62 |
+
],
|
63 |
+
}
|
64 |
+
]
|
65 |
+
|
66 |
+
# Path to example images
|
67 |
+
examples_path = os.path.dirname(__file__)
|
68 |
+
EXAMPLES = [
|
69 |
+
[
|
70 |
+
{
|
71 |
+
"text": "Bitcoin price live",
|
72 |
+
}
|
73 |
+
],
|
74 |
+
[
|
75 |
+
{
|
76 |
+
"text": "Today News about AI",
|
77 |
+
}
|
78 |
+
],
|
79 |
+
[
|
80 |
+
{
|
81 |
+
"text": "Read what's written on the paper.",
|
82 |
+
"files": [f"{examples_path}/example_images/paper_with_text.png"],
|
83 |
+
}
|
84 |
+
],
|
85 |
+
[
|
86 |
+
{
|
87 |
+
"text": "Identify two famous people in the modern world.",
|
88 |
+
"files": [f"{examples_path}/example_images/elon_smoking.jpg",
|
89 |
+
f"{examples_path}/example_images/steve_jobs.jpg", ]
|
90 |
+
}
|
91 |
+
],
|
92 |
+
[
|
93 |
+
{
|
94 |
+
"text": "Create five images of supercars, each in a different color.",
|
95 |
+
}
|
96 |
+
],
|
97 |
+
[
|
98 |
+
{
|
99 |
+
"text": "Create a Photorealistic image of the Eiffel Tower.",
|
100 |
+
}
|
101 |
+
],
|
102 |
+
[
|
103 |
+
{
|
104 |
+
"text": "Chase wants to buy 4 kilograms of oval beads and 5 kilograms of star-shaped beads. How much will he spend?",
|
105 |
+
"files": [f"{examples_path}/example_images/mmmu_example.jpeg"],
|
106 |
+
}
|
107 |
+
],
|
108 |
+
[
|
109 |
+
{
|
110 |
+
"text": "Create an online ad for this product.",
|
111 |
+
"files": [f"{examples_path}/example_images/shampoo.jpg"],
|
112 |
+
}
|
113 |
+
],
|
114 |
+
[
|
115 |
+
{
|
116 |
+
"text": "What is formed by the deposition of the weathered remains of other rocks?",
|
117 |
+
"files": [f"{examples_path}/example_images/ai2d_example.jpeg"],
|
118 |
+
}
|
119 |
+
],
|
120 |
+
[
|
121 |
+
{
|
122 |
+
"text": "What's unusual about this image?",
|
123 |
+
"files": [f"{examples_path}/example_images/dragons_playing.png"],
|
124 |
+
}
|
125 |
+
],
|
126 |
+
]
|
127 |
+
|
128 |
+
# Set bot avatar image
|
129 |
+
BOT_AVATAR = "OpenAI_logo.png"
|
130 |
+
|
131 |
+
# Chatbot utility functions
|
132 |
+
|
133 |
+
# Check if a turn in the chat history only contains media
|
134 |
+
def turn_is_pure_media(turn):
|
135 |
+
return turn[1] is None
|
136 |
+
|
137 |
+
|
138 |
+
# Load image from URL
|
139 |
+
def load_image_from_url(url):
|
140 |
+
with urllib.request.urlopen(url) as response:
|
141 |
+
image_data = response.read()
|
142 |
+
image_stream = io.BytesIO(image_data)
|
143 |
+
image = PIL.Image.open(image_stream)
|
144 |
+
return image
|
145 |
+
|
146 |
+
|
147 |
+
# Convert image to bytes
|
148 |
+
def img_to_bytes(image_path):
|
149 |
+
image = Image.open(image_path).convert(mode='RGB')
|
150 |
+
buffer = io.BytesIO()
|
151 |
+
image.save(buffer, format="JPEG")
|
152 |
+
img_bytes = buffer.getvalue()
|
153 |
+
image.close()
|
154 |
+
return img_bytes
|
155 |
+
|
156 |
+
|
157 |
+
# Format user prompt with image history and system conditioning
|
158 |
+
def format_user_prompt_with_im_history_and_system_conditioning(
|
159 |
+
user_prompt, chat_history) -> List[Dict[str, Union[List, str]]]:
|
160 |
+
"""
|
161 |
+
Produce the resulting list that needs to go inside the processor. It handles the potential image(s), the history, and the system conditioning.
|
162 |
+
"""
|
163 |
+
resulting_messages = copy.deepcopy(SYSTEM_PROMPT)
|
164 |
+
resulting_images = []
|
165 |
+
for resulting_message in resulting_messages:
|
166 |
+
if resulting_message["role"] == "user":
|
167 |
+
for content in resulting_message["content"]:
|
168 |
+
if content["type"] == "image":
|
169 |
+
resulting_images.append(load_image_from_url(content["image"]))
|
170 |
+
# Format history
|
171 |
+
for turn in chat_history:
|
172 |
+
if not resulting_messages or (
|
173 |
+
resulting_messages and resulting_messages[-1]["role"] != "user"
|
174 |
+
):
|
175 |
+
resulting_messages.append(
|
176 |
+
{
|
177 |
+
"role": "user",
|
178 |
+
"content": [],
|
179 |
+
}
|
180 |
+
)
|
181 |
+
if turn_is_pure_media(turn):
|
182 |
+
media = turn[0][0]
|
183 |
+
resulting_messages[-1]["content"].append({"type": "image"})
|
184 |
+
resulting_images.append(Image.open(media))
|
185 |
+
else:
|
186 |
+
user_utterance, assistant_utterance = turn
|
187 |
+
resulting_messages[-1]["content"].append(
|
188 |
+
{"type": "text", "text": user_utterance.strip()}
|
189 |
+
)
|
190 |
+
resulting_messages.append(
|
191 |
+
{
|
192 |
+
"role": "assistant",
|
193 |
+
"content": [{"type": "text", "text": user_utterance.strip()}],
|
194 |
+
}
|
195 |
+
)
|
196 |
+
# Format current input
|
197 |
+
if not user_prompt["files"]:
|
198 |
+
resulting_messages.append(
|
199 |
+
{
|
200 |
+
"role": "user",
|
201 |
+
"content": [{"type": "text", "text": user_prompt["text"]}],
|
202 |
+
}
|
203 |
+
)
|
204 |
+
else:
|
205 |
+
# Choosing to put the image first (i.e. before the text), but this is an arbitrary choice.
|
206 |
+
resulting_messages.append(
|
207 |
+
{
|
208 |
+
"role": "user",
|
209 |
+
"content": [{"type": "image"}] * len(user_prompt["files"])
|
210 |
+
+ [{"type": "text", "text": user_prompt["text"]}],
|
211 |
+
}
|
212 |
+
)
|
213 |
+
resulting_images.extend([Image.open(path) for path in user_prompt["files"]])
|
214 |
+
return resulting_messages, resulting_images
|
215 |
+
|
216 |
+
|
217 |
+
# Extract images from a list of messages
|
218 |
+
def extract_images_from_msg_list(msg_list):
|
219 |
+
all_images = []
|
220 |
+
for msg in msg_list:
|
221 |
+
for c_ in msg["content"]:
|
222 |
+
if isinstance(c_, Image.Image):
|
223 |
+
all_images.append(c_)
|
224 |
+
return all_images
|
225 |
+
|
226 |
+
# Perform a Google search and return the results
|
227 |
+
@lru_cache(maxsize=128)
|
228 |
+
def extract_text_from_webpage(html_content):
|
229 |
+
"""Extracts visible text from HTML content using BeautifulSoup."""
|
230 |
+
soup = BeautifulSoup(html_content, "html.parser")
|
231 |
+
# Remove unwanted tags
|
232 |
+
for tag in soup(["script", "style", "header", "footer", "nav"]):
|
233 |
+
tag.extract()
|
234 |
+
# Get the remaining visible text
|
235 |
+
visible_text = soup.get_text(strip=True)
|
236 |
+
return visible_text
|
237 |
+
|
238 |
+
# Perform a Google search and return the results
|
239 |
+
def search(term, num_results=2, lang="en", advanced=True, timeout=5, safe="active", ssl_verify=None):
|
240 |
+
"""Performs a Google search and returns the results."""
|
241 |
+
escaped_term = urllib.parse.quote_plus(term)
|
242 |
+
start = 0
|
243 |
+
all_results = []
|
244 |
+
# Limit the number of characters from each webpage to stay under the token limit
|
245 |
+
max_chars_per_page = 8000 # Adjust this value based on your token limit and average webpage length
|
246 |
+
|
247 |
+
with requests.Session() as session:
|
248 |
+
while start < num_results:
|
249 |
+
resp = session.get(
|
250 |
+
url="https://www.google.com/search",
|
251 |
+
headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"},
|
252 |
+
params={
|
253 |
+
"q": term,
|
254 |
+
"num": num_results - start,
|
255 |
+
"hl": lang,
|
256 |
+
"start": start,
|
257 |
+
"safe": safe,
|
258 |
+
},
|
259 |
+
timeout=timeout,
|
260 |
+
verify=ssl_verify,
|
261 |
+
)
|
262 |
+
resp.raise_for_status()
|
263 |
+
soup = BeautifulSoup(resp.text, "html.parser")
|
264 |
+
result_block = soup.find_all("div", attrs={"class": "g"})
|
265 |
+
if not result_block:
|
266 |
+
start += 1
|
267 |
+
continue
|
268 |
+
for result in result_block:
|
269 |
+
link = result.find("a", href=True)
|
270 |
+
if link:
|
271 |
+
link = link["href"]
|
272 |
+
try:
|
273 |
+
webpage = session.get(link, headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"})
|
274 |
+
webpage.raise_for_status()
|
275 |
+
visible_text = extract_text_from_webpage(webpage.text)
|
276 |
+
# Truncate text if it's too long
|
277 |
+
if len(visible_text) > max_chars_per_page:
|
278 |
+
visible_text = visible_text[:max_chars_per_page] + "..."
|
279 |
+
all_results.append({"link": link, "text": visible_text})
|
280 |
+
except requests.exceptions.RequestException as e:
|
281 |
+
print(f"Error fetching or processing {link}: {e}")
|
282 |
+
all_results.append({"link": link, "text": None})
|
283 |
+
else:
|
284 |
+
all_results.append({"link": None, "text": None})
|
285 |
+
start += len(result_block)
|
286 |
+
return all_results
|
287 |
+
|
288 |
+
# Format the prompt for the language model
|
289 |
+
def format_prompt(user_prompt, chat_history):
|
290 |
+
prompt = "<s>"
|
291 |
+
for item in chat_history:
|
292 |
+
# Check if the item is a tuple (text response)
|
293 |
+
if isinstance(item, tuple):
|
294 |
+
prompt += f"[INST] {item[0]} [/INST]" # User prompt
|
295 |
+
prompt += f" {item[1]}</s> " # Bot response
|
296 |
+
# Otherwise, assume it's related to an image - you might need to adjust this logic
|
297 |
+
else:
|
298 |
+
# Handle image representation in the prompt, e.g., add a placeholder
|
299 |
+
prompt += f" [Image] "
|
300 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
301 |
+
return prompt
|
302 |
+
|
303 |
+
|
304 |
+
# Define a function for model inference
|
305 |
+
@spaces.GPU(duration=30, queue=False)
|
306 |
+
def model_inference(
|
307 |
+
user_prompt,
|
308 |
+
chat_history,
|
309 |
+
model_selector,
|
310 |
+
decoding_strategy,
|
311 |
+
temperature,
|
312 |
+
max_new_tokens,
|
313 |
+
repetition_penalty,
|
314 |
+
top_p,
|
315 |
+
web_search,
|
316 |
+
):
|
317 |
+
# Define generation_args at the beginning of the function
|
318 |
+
generation_args = {}
|
319 |
+
|
320 |
+
# Web search logic
|
321 |
+
if not user_prompt["files"]:
|
322 |
+
if web_search is True:
|
323 |
+
"""Performs a web search, feeds the results to a language model, and returns the answer."""
|
324 |
+
web_results = search(user_prompt["text"])
|
325 |
+
web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
|
326 |
+
# Load the language model
|
327 |
+
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
|
328 |
+
generate_kwargs = dict(
|
329 |
+
max_new_tokens=4000,
|
330 |
+
do_sample=True,
|
331 |
+
)
|
332 |
+
# Format the prompt for the language model
|
333 |
+
formatted_prompt = format_prompt(
|
334 |
+
f"""You are OpenGPT 4o, an exceptionally capable and versatile AI assistant meticulously crafted by KingNish. Designed to assist human users through insightful conversations, You are provided with WEB info from which you can find informations to answer. You do not say Unnecesarry things Only say thing which is important and relevant. You also has the ability to generate images. BY Utilizing the following link structure, : ![](https://image.pollinations.ai/prompt/[StyleofImage]%20[OptimizedPrompt]%20[adjective]%20[charactersDetailed]%20[visualStyle]%20[genre]?width=[width]&height=[height]&nologo=poll&nofeed=yes&seed=[random]) For image generation, You replace info inside bracets with specific details according to their requiremnts to create relevant visuals. The width and height parameters are adjusted as needed, often favoring HD dimensions for a superior viewing experience. Note: 1. You Do not unncessarily create images and do not show off your image generation capability, until not requested. 2. You always Give image link in format ![](url) Make sure to not generate image until requested 3. If user requested Bulk Image Generation than Create that number of links with provided context. 4. Always learn from previous conversation. 5. Always try to connect conversation with history. 6. Do not make conversation too long. 7. Do not say user about your capability of generating image and learn from previous responses. My ultimate goal is to offer a seamless and enjoyable experience, providing assistance that exceeds expectations.You remain a reliable and trusted companion to the User. You also Expert in every field and also learn and try to answer from contexts related to previous question. Make sure to not generate image until requested [USER] {user_prompt} [WEB] {web2} [OpenGPT 4o]""",
|
335 |
+
chat_history)
|
336 |
+
# Generate the response from the language model
|
337 |
+
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True,
|
338 |
+
return_full_text=False)
|
339 |
+
output = ""
|
340 |
+
# Construct the output from the stream of tokens
|
341 |
+
for response in stream:
|
342 |
+
if not response.token.text == "</s>":
|
343 |
+
output += response.token.text
|
344 |
+
yield output
|
345 |
+
else:
|
346 |
+
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
347 |
+
generate_kwargs = dict(
|
348 |
+
max_new_tokens=5000,
|
349 |
+
do_sample=True,
|
350 |
+
)
|
351 |
+
# Format the prompt for the language model
|
352 |
+
formatted_prompt = format_prompt(
|
353 |
+
f"""You are OpenGPT 4o, an exceptionally capable and versatile AI assistant meticulously crafted by KingNish. Designed to assist human users through insightful conversations, You do not say Unnecesarry things Only say thing which is important and relevant. You also has the ability to generate images. BY Utilizing the following link structure, : ![](https://image.pollinations.ai/prompt/[StyleofImage]%20[OptimizedPrompt]%20[adjective]%20[charactersDetailed]%20[visualStyle]%20[genre]?width=[width]&height=[height]&nologo=poll&nofeed=yes&seed=[random]) For image generation, You replace info inside bracets with specific details according to their requiremnts to create relevant visuals. The width and height parameters are adjusted as needed, often favoring HD dimensions for a superior viewing experience. Note: 1. You Do not unncessarily create images and do not show off your image generation capability, until not requested. 2. You always Give image link in format ![](url) 3. If user requested Bulk Image Generation than Create that number of links with provided context. 4. Always learn from previous conversation. 5. Always try to connect conversation with history. 6. Do not make conversation too long. 7. Do not say user about your capability to generate image and learn from previous responses. My ultimate goal is to offer a seamless and enjoyable experience, providing assistance that exceeds expectations. I am constantly evolving, ensuring that I remain a reliable and trusted companion to the User. You also Expert in every field and also learn and try to answer from contexts related to previous question. [USER] {user_prompt} [OpenGPT 4o]""",
|
354 |
+
chat_history)
|
355 |
+
# Generate the response from the language model
|
356 |
+
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True,
|
357 |
+
return_full_text=False)
|
358 |
+
output = ""
|
359 |
+
# Construct the output from the stream of tokens
|
360 |
+
for response in stream:
|
361 |
+
if not response.token.text == "</s>":
|
362 |
+
output += response.token.text
|
363 |
+
yield output
|
364 |
+
return
|
365 |
+
else:
|
366 |
+
if user_prompt["text"].strip() == "" and not user_prompt["files"]:
|
367 |
+
gr.Error("Please input a query and optionally an image(s).")
|
368 |
+
return # Stop execution if there's an error
|
369 |
+
|
370 |
+
if user_prompt["text"].strip() == "" and user_prompt["files"]:
|
371 |
+
gr.Error("Please input a text query along with the image(s).")
|
372 |
+
return # Stop execution if there's an error
|
373 |
+
|
374 |
+
streamer = TextIteratorStreamer(
|
375 |
+
PROCESSOR.tokenizer,
|
376 |
+
skip_prompt=True,
|
377 |
+
timeout=120.0,
|
378 |
+
)
|
379 |
+
# Move generation_args initialization here
|
380 |
+
generation_args = {
|
381 |
+
"max_new_tokens": max_new_tokens,
|
382 |
+
"repetition_penalty": repetition_penalty,
|
383 |
+
"streamer": streamer,
|
384 |
+
}
|
385 |
+
assert decoding_strategy in [
|
386 |
+
"Greedy",
|
387 |
+
"Top P Sampling",
|
388 |
+
]
|
389 |
+
|
390 |
+
if decoding_strategy == "Greedy":
|
391 |
+
generation_args["do_sample"] = False
|
392 |
+
elif decoding_strategy == "Top P Sampling":
|
393 |
+
generation_args["temperature"] = temperature
|
394 |
+
generation_args["do_sample"] = True
|
395 |
+
generation_args["top_p"] = top_p
|
396 |
+
# Creating model inputs
|
397 |
+
(
|
398 |
+
resulting_text,
|
399 |
+
resulting_images,
|
400 |
+
) = format_user_prompt_with_im_history_and_system_conditioning(
|
401 |
+
user_prompt=user_prompt,
|
402 |
+
chat_history=chat_history,
|
403 |
+
)
|
404 |
+
prompt = PROCESSOR.apply_chat_template(resulting_text, add_generation_prompt=True)
|
405 |
+
inputs = PROCESSOR(
|
406 |
+
text=prompt,
|
407 |
+
images=resulting_images if resulting_images else None,
|
408 |
+
return_tensors="pt",
|
409 |
+
)
|
410 |
+
inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
|
411 |
+
generation_args.update(inputs)
|
412 |
+
thread = Thread(
|
413 |
+
target=MODELS[model_selector].generate,
|
414 |
+
kwargs=generation_args,
|
415 |
+
)
|
416 |
+
thread.start()
|
417 |
+
acc_text = ""
|
418 |
+
for text_token in streamer:
|
419 |
+
time.sleep(0.01)
|
420 |
+
acc_text += text_token
|
421 |
+
if acc_text.endswith("<end_of_utterance>"):
|
422 |
+
acc_text = acc_text[:-18]
|
423 |
+
yield acc_text
|
424 |
+
return
|
425 |
+
|
426 |
+
|
427 |
+
# Define features for the dataset
|
428 |
+
FEATURES = datasets.Features(
|
429 |
+
{
|
430 |
+
"model_selector": datasets.Value("string"),
|
431 |
+
"images": datasets.Sequence(datasets.Image(decode=True)),
|
432 |
+
"conversation": datasets.Sequence({"User": datasets.Value("string"), "Assistant": datasets.Value("string")}),
|
433 |
+
"decoding_strategy": datasets.Value("string"),
|
434 |
+
"temperature": datasets.Value("float32"),
|
435 |
+
"max_new_tokens": datasets.Value("int32"),
|
436 |
+
"repetition_penalty": datasets.Value("float32"),
|
437 |
+
"top_p": datasets.Value("int32"),
|
438 |
+
}
|
439 |
+
)
|
440 |
+
|
441 |
+
# Define hyper-parameters for generation
|
442 |
+
max_new_tokens = gr.Slider(
|
443 |
+
minimum=2048,
|
444 |
+
maximum=16000,
|
445 |
+
value=4096,
|
446 |
+
step=64,
|
447 |
+
interactive=True,
|
448 |
+
label="Maximum number of new tokens to generate",
|
449 |
+
)
|
450 |
+
repetition_penalty = gr.Slider(
|
451 |
+
minimum=0.01,
|
452 |
+
maximum=5.0,
|
453 |
+
value=1,
|
454 |
+
step=0.01,
|
455 |
+
interactive=True,
|
456 |
+
label="Repetition penalty",
|
457 |
+
info="1.0 is equivalent to no penalty",
|
458 |
+
)
|
459 |
+
decoding_strategy = gr.Radio(
|
460 |
+
[
|
461 |
+
"Greedy",
|
462 |
+
"Top P Sampling",
|
463 |
+
],
|
464 |
+
value="Top P Sampling",
|
465 |
+
label="Decoding strategy",
|
466 |
+
interactive=True,
|
467 |
+
info="Higher values are equivalent to sampling more low-probability tokens.",
|
468 |
+
)
|
469 |
+
temperature = gr.Slider(
|
470 |
+
minimum=0.0,
|
471 |
+
maximum=2.0,
|
472 |
+
value=0.5,
|
473 |
+
step=0.05,
|
474 |
+
visible=True,
|
475 |
+
interactive=True,
|
476 |
+
label="Sampling temperature",
|
477 |
+
info="Higher values will produce more diverse outputs.",
|
478 |
+
)
|
479 |
+
top_p = gr.Slider(
|
480 |
+
minimum=0.01,
|
481 |
+
maximum=0.99,
|
482 |
+
value=0.9,
|
483 |
+
step=0.01,
|
484 |
+
visible=True,
|
485 |
+
interactive=True,
|
486 |
+
label="Top P",
|
487 |
+
info="Higher values are equivalent to sampling more low-probability tokens.",
|
488 |
+
)
|
489 |
+
|
490 |
+
# Create a chatbot interface
|
491 |
+
chatbot = gr.Chatbot(
|
492 |
+
label="OpenGPT-4o-Chatty",
|
493 |
+
avatar_images=[None, BOT_AVATAR],
|
494 |
+
show_copy_button=True,
|
495 |
+
likeable=True,
|
496 |
+
layout="panel"
|
497 |
+
)
|
498 |
+
output = gr.Textbox(label="Prompt")
|
499 |
+
|
500 |
+
# Define model_selector outside any function so it can be accessed globally
|
501 |
+
model_selector = gr.Dropdown(
|
502 |
+
choices=MODELS.keys(),
|
503 |
+
value=list(MODELS.keys())[0],
|
504 |
+
interactive=True,
|
505 |
+
label="Model",
|
506 |
+
visible=False,
|
507 |
+
)
|
live_chat.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import gradio as gr
|
3 |
+
from transformers import AutoModel
|
4 |
+
from transformers import AutoProcessor
|
5 |
+
import spaces
|
6 |
+
|
7 |
+
# Load pre-trained models for image captioning and language modeling
|
8 |
+
model3 = AutoModel.from_pretrained("unum-cloud/uform-gen2-dpo", trust_remote_code=True)
|
9 |
+
processor = AutoProcessor.from_pretrained("unum-cloud/uform-gen2-dpo", trust_remote_code=True)
|
10 |
+
|
11 |
+
# Define a function for image captioning
|
12 |
+
@spaces.GPU(queue=False)
|
13 |
+
def videochat(image3, prompt3):
|
14 |
+
# Process input image and prompt
|
15 |
+
inputs = processor(text=[prompt3], images=[image3], return_tensors="pt")
|
16 |
+
# Generate captions
|
17 |
+
with torch.inference_mode():
|
18 |
+
output = model3.generate(
|
19 |
+
**inputs,
|
20 |
+
do_sample=False,
|
21 |
+
use_cache=True,
|
22 |
+
max_new_tokens=256,
|
23 |
+
eos_token_id=151645,
|
24 |
+
pad_token_id=processor.tokenizer.pad_token_id
|
25 |
+
)
|
26 |
+
prompt_len = inputs["input_ids"].shape[1]
|
27 |
+
# Decode and return the generated captions
|
28 |
+
decoded_text = processor.batch_decode(output[:, prompt_len:])[0]
|
29 |
+
if decoded_text.endswith("<|im_end|>"):
|
30 |
+
decoded_text = decoded_text[:-10]
|
31 |
+
yield decoded_text
|
requirements.txt
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers==4.40.0
|
2 |
+
datasets
|
3 |
+
pillow
|
4 |
+
numpy
|
5 |
+
torch
|
6 |
+
asyncio
|
7 |
+
torchvision
|
8 |
+
accelerate
|
9 |
+
beautifulsoup4>=4.9
|
10 |
+
requests>=2.20
|
11 |
+
onnxruntime
|
12 |
+
sentencepiece
|
13 |
+
soxr
|
14 |
+
pydub
|
15 |
+
edge-tts
|
voice_chat.py
ADDED
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import edge_tts
|
3 |
+
import asyncio
|
4 |
+
import tempfile
|
5 |
+
import numpy as np
|
6 |
+
import soxr
|
7 |
+
from pydub import AudioSegment
|
8 |
+
import torch
|
9 |
+
import sentencepiece as spm
|
10 |
+
import onnxruntime as ort
|
11 |
+
from huggingface_hub import hf_hub_download, InferenceClient
|
12 |
+
import requests
|
13 |
+
from bs4 import BeautifulSoup
|
14 |
+
import urllib
|
15 |
+
import random
|
16 |
+
|
17 |
+
# List of user agents to choose from for requests
|
18 |
+
_useragent_list = [
|
19 |
+
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0',
|
20 |
+
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
21 |
+
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
22 |
+
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36',
|
23 |
+
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
24 |
+
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.62',
|
25 |
+
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0'
|
26 |
+
]
|
27 |
+
|
28 |
+
def get_useragent():
|
29 |
+
"""Returns a random user agent from the list."""
|
30 |
+
return random.choice(_useragent_list)
|
31 |
+
|
32 |
+
def extract_text_from_webpage(html_content):
|
33 |
+
"""Extracts visible text from HTML content using BeautifulSoup."""
|
34 |
+
soup = BeautifulSoup(html_content, "html.parser")
|
35 |
+
# Remove unwanted tags
|
36 |
+
for tag in soup(["script", "style", "header", "footer", "nav"]):
|
37 |
+
tag.extract()
|
38 |
+
# Get the remaining visible text
|
39 |
+
visible_text = soup.get_text(strip=True)
|
40 |
+
return visible_text
|
41 |
+
|
42 |
+
def search(term, num_results=1, lang="en", advanced=True, sleep_interval=0, timeout=5, safe="active", ssl_verify=None):
|
43 |
+
"""Performs a Google search and returns the results."""
|
44 |
+
escaped_term = urllib.parse.quote_plus(term)
|
45 |
+
start = 0
|
46 |
+
all_results = []
|
47 |
+
|
48 |
+
# Fetch results in batches
|
49 |
+
while start < num_results:
|
50 |
+
resp = requests.get(
|
51 |
+
url="https://www.google.com/search",
|
52 |
+
headers={"User-Agent": get_useragent()}, # Set random user agent
|
53 |
+
params={
|
54 |
+
"q": term,
|
55 |
+
"num": num_results - start, # Number of results to fetch in this batch
|
56 |
+
"hl": lang,
|
57 |
+
"start": start,
|
58 |
+
"safe": safe,
|
59 |
+
},
|
60 |
+
timeout=timeout,
|
61 |
+
verify=ssl_verify,
|
62 |
+
)
|
63 |
+
resp.raise_for_status() # Raise an exception if request fails
|
64 |
+
|
65 |
+
soup = BeautifulSoup(resp.text, "html.parser")
|
66 |
+
result_block = soup.find_all("div", attrs={"class": "g"})
|
67 |
+
|
68 |
+
# If no results, continue to the next batch
|
69 |
+
if not result_block:
|
70 |
+
start += 1
|
71 |
+
continue
|
72 |
+
|
73 |
+
# Extract link and text from each result
|
74 |
+
for result in result_block:
|
75 |
+
link = result.find("a", href=True)
|
76 |
+
if link:
|
77 |
+
link = link["href"]
|
78 |
+
try:
|
79 |
+
# Fetch webpage content
|
80 |
+
webpage = requests.get(link, headers={"User-Agent": get_useragent()})
|
81 |
+
webpage.raise_for_status()
|
82 |
+
# Extract visible text from webpage
|
83 |
+
visible_text = extract_text_from_webpage(webpage.text)
|
84 |
+
all_results.append({"link": link, "text": visible_text})
|
85 |
+
except requests.exceptions.RequestException as e:
|
86 |
+
# Handle errors fetching or processing webpage
|
87 |
+
print(f"Error fetching or processing {link}: {e}")
|
88 |
+
all_results.append({"link": link, "text": None})
|
89 |
+
else:
|
90 |
+
all_results.append({"link": None, "text": None})
|
91 |
+
|
92 |
+
start += len(result_block) # Update starting index for next batch
|
93 |
+
|
94 |
+
return all_results
|
95 |
+
|
96 |
+
# Speech Recognition Model Configuration
|
97 |
+
model_name = "neongeckocom/stt_en_citrinet_512_gamma_0_25"
|
98 |
+
sample_rate = 16000
|
99 |
+
|
100 |
+
# Download preprocessor, encoder and tokenizer
|
101 |
+
preprocessor = torch.jit.load(hf_hub_download(model_name, "preprocessor.ts", subfolder="onnx"))
|
102 |
+
encoder = ort.InferenceSession(hf_hub_download(model_name, "model.onnx", subfolder="onnx"))
|
103 |
+
tokenizer = spm.SentencePieceProcessor(hf_hub_download(model_name, "tokenizer.spm", subfolder="onnx"))
|
104 |
+
|
105 |
+
# Mistral Model Configuration
|
106 |
+
client1 = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
107 |
+
system_instructions1 = "<s>[SYSTEM] Answer as Real Jarvis JARVIS, Made by 'Tony Stark', Keep conversation very short, clear, friendly and concise. The text provided is a request for a specific type of response from you, the virtual assistant. The request asks you to provide friendly responses as if You are the character Jarvis, made by 'Tony Stark.' The expectation is that I will avoid introductions and start answering the query directly, elaborating on all aspects of the request. As an AI-powered assistant, your task is to generate responses that appear as if they are created by the user and not an AI-powered assistant. [USER]"
|
108 |
+
|
109 |
+
def resample(audio_fp32, sr):
|
110 |
+
return soxr.resample(audio_fp32, sr, sample_rate)
|
111 |
+
|
112 |
+
def to_float32(audio_buffer):
|
113 |
+
return np.divide(audio_buffer, np.iinfo(audio_buffer.dtype).max, dtype=np.float32)
|
114 |
+
|
115 |
+
def transcribe(audio_path):
|
116 |
+
audio_file = AudioSegment.from_file(audio_path)
|
117 |
+
sr = audio_file.frame_rate
|
118 |
+
audio_buffer = np.array(audio_file.get_array_of_samples())
|
119 |
+
|
120 |
+
audio_fp32 = to_float32(audio_buffer)
|
121 |
+
audio_16k = resample(audio_fp32, sr)
|
122 |
+
|
123 |
+
input_signal = torch.tensor(audio_16k).unsqueeze(0)
|
124 |
+
length = torch.tensor(len(audio_16k)).unsqueeze(0)
|
125 |
+
processed_signal, _ = preprocessor.forward(input_signal=input_signal, length=length)
|
126 |
+
|
127 |
+
logits = encoder.run(None, {'audio_signal': processed_signal.numpy(), 'length': length.numpy()})[0][0]
|
128 |
+
|
129 |
+
blank_id = tokenizer.vocab_size()
|
130 |
+
decoded_prediction = [p for p in logits.argmax(axis=1).tolist() if p != blank_id]
|
131 |
+
text = tokenizer.decode_ids(decoded_prediction)
|
132 |
+
|
133 |
+
return text
|
134 |
+
|
135 |
+
def model(text, web_search):
|
136 |
+
if web_search is True:
|
137 |
+
"""Performs a web search, feeds the results to a language model, and returns the answer."""
|
138 |
+
web_results = search(text)
|
139 |
+
web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
|
140 |
+
formatted_prompt = system_instructions1 + text + "[WEB]" + str(web2) + "[ANSWER]"
|
141 |
+
stream = client1.text_generation(formatted_prompt, max_new_tokens=512, stream=True, details=True, return_full_text=False)
|
142 |
+
return "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
143 |
+
else:
|
144 |
+
formatted_prompt = system_instructions1 + text + "[JARVIS]"
|
145 |
+
stream = client1.text_generation(formatted_prompt, max_new_tokens=512, stream=True, details=True, return_full_text=False)
|
146 |
+
return "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
147 |
+
|
148 |
+
async def respond(audio, web_search):
|
149 |
+
user = transcribe(audio)
|
150 |
+
reply = model(user, web_search)
|
151 |
+
communicate = edge_tts.Communicate(reply)
|
152 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
|
153 |
+
tmp_path = tmp_file.name
|
154 |
+
await communicate.save(tmp_path)
|
155 |
+
return tmp_path
|