Spaces:
Sleeping
Sleeping
Commit
·
1eaecc8
1
Parent(s):
eafc43e
add
Browse files- .gitattributes +36 -0
- README.md +12 -0
- app.log +1 -0
- app.py +416 -0
- constants.py +27 -0
- conversation.py +460 -0
- examples/0.jpg +0 -0
- examples/1.jpg +0 -0
- examples/2.jpg +0 -0
- gradio_web_server.log +905 -0
- mmproj-model-f16.gguf +3 -0
- requirements.txt +165 -0
- test.py +479 -0
- utils.py +126 -0
.gitattributes
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
mmproj-model-f16.gguf filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Gdio
|
3 |
+
emoji: 🐢
|
4 |
+
colorFrom: indigo
|
5 |
+
colorTo: blue
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 4.25.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
---
|
11 |
+
|
12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.log
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
2024-04-05 10:21:51 | ERROR | stderr | D:\IEEESurvive\gdio\.wenv\Scripts\python.exe: Error while finding module specification for 'app.py' (ModuleNotFoundError: __path__ attribute not found on 'app' while trying to find 'app.py'). Try using 'app' instead of 'app.py' as the module name.
|
app.py
ADDED
@@ -0,0 +1,416 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import datetime
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
import time
|
6 |
+
import random
|
7 |
+
import gradio as gr
|
8 |
+
import requests
|
9 |
+
import base64
|
10 |
+
from io import BytesIO
|
11 |
+
import re
|
12 |
+
from PIL import Image, ImageDraw
|
13 |
+
from llama_cpp import Llama
|
14 |
+
from llama_cpp.llama_chat_format import Llava15ChatHandler
|
15 |
+
|
16 |
+
from conversation import (default_conversation, conv_templates,
|
17 |
+
SeparatorStyle)
|
18 |
+
from constants import LOGDIR
|
19 |
+
from utils import (build_logger, server_error_msg,
|
20 |
+
violates_moderation, moderation_msg)
|
21 |
+
import hashlib
|
22 |
+
import urllib.request
|
23 |
+
|
24 |
+
urllib.request.urlretrieve("https://huggingface.co/Galunid/ShareGPT4V-gguf/resolve/main/mmproj-model-f16.gguf?download=true", "./mmproj-model-f16.gguf")
|
25 |
+
chat_handler = Llava15ChatHandler(clip_model_path="./mmproj-model-f16.gguf")
|
26 |
+
|
27 |
+
llm = Llama.from_pretrained(
|
28 |
+
repo_id="Galunid/ShareGPT4V-gguf",
|
29 |
+
filename="ShareGPT4V-f16.gguf",
|
30 |
+
chat_handler=chat_handler,
|
31 |
+
verbose=False,
|
32 |
+
n_ctx=2048, # n_ctx should be increased to accomodate the image embedding
|
33 |
+
logits_all=True,# needed to make llava work
|
34 |
+
)
|
35 |
+
|
36 |
+
logger = build_logger("gradio_web_server", "gradio_web_server.log")
|
37 |
+
|
38 |
+
headers = {"User-Agent": "Wafer Defect Detection with LLM Classification and Analyze Client"}
|
39 |
+
|
40 |
+
no_change_btn = gr.Button()
|
41 |
+
enable_btn = gr.Button(interactive=True)
|
42 |
+
disable_btn = gr.Button(interactive=False)
|
43 |
+
|
44 |
+
priority = {
|
45 |
+
"vicuna-13b": "aaaaaaa",
|
46 |
+
"koala-13b": "aaaaaab",
|
47 |
+
}
|
48 |
+
|
49 |
+
|
50 |
+
def get_conv_log_filename():
|
51 |
+
t = datetime.datetime.now()
|
52 |
+
name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-conv.json")
|
53 |
+
return name
|
54 |
+
|
55 |
+
get_window_url_params = """
|
56 |
+
function() {
|
57 |
+
const params = new URLSearchParams(window.location.search);
|
58 |
+
url_params = Object.fromEntries(params);
|
59 |
+
console.log(url_params);
|
60 |
+
return url_params;
|
61 |
+
}
|
62 |
+
"""
|
63 |
+
|
64 |
+
|
65 |
+
def load_demo(url_params, request: gr.Request):
|
66 |
+
logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}")
|
67 |
+
|
68 |
+
default_models = []
|
69 |
+
dropdown_update = gr.Dropdown(
|
70 |
+
choices=default_models,
|
71 |
+
value=default_models[0] if len(default_models) > 0 else ""
|
72 |
+
)
|
73 |
+
|
74 |
+
state = default_conversation.copy()
|
75 |
+
return state, dropdown_update
|
76 |
+
|
77 |
+
|
78 |
+
def load_demo_refresh_model_list(request: gr.Request):
|
79 |
+
logger.info(f"load_demo. ip: {request.client.host}")
|
80 |
+
state = default_conversation.copy()
|
81 |
+
|
82 |
+
default_models = []
|
83 |
+
dropdown_update = gr.Dropdown(
|
84 |
+
choices=default_models,
|
85 |
+
value=default_models[0] if len(default_models) > 0 else ""
|
86 |
+
)
|
87 |
+
return state, dropdown_update
|
88 |
+
|
89 |
+
|
90 |
+
def vote_last_response(state, vote_type, model_selector, request: gr.Request):
|
91 |
+
with open(get_conv_log_filename(), "a") as fout:
|
92 |
+
data = {
|
93 |
+
"tstamp": round(time.time(), 4),
|
94 |
+
"type": vote_type,
|
95 |
+
"model": model_selector,
|
96 |
+
"state": state.dict(),
|
97 |
+
"ip": request.client.host,
|
98 |
+
}
|
99 |
+
fout.write(json.dumps(data) + "\n")
|
100 |
+
|
101 |
+
|
102 |
+
def upvote_last_response(state, model_selector, request: gr.Request):
|
103 |
+
logger.info(f"upvote. ip: {request.client.host}")
|
104 |
+
vote_last_response(state, "upvote", model_selector, request)
|
105 |
+
return ("",) + (disable_btn,) * 3
|
106 |
+
|
107 |
+
|
108 |
+
def downvote_last_response(state, model_selector, request: gr.Request):
|
109 |
+
logger.info(f"downvote. ip: {request.client.host}")
|
110 |
+
vote_last_response(state, "downvote", model_selector, request)
|
111 |
+
return ("",) + (disable_btn,) * 3
|
112 |
+
|
113 |
+
|
114 |
+
def flag_last_response(state, model_selector, request: gr.Request):
|
115 |
+
logger.info(f"flag. ip: {request.client.host}")
|
116 |
+
vote_last_response(state, "flag", model_selector, request)
|
117 |
+
return ("",) + (disable_btn,) * 3
|
118 |
+
|
119 |
+
|
120 |
+
def regenerate(state, image_process_mode, request: gr.Request):
|
121 |
+
logger.info(f"regenerate. ip: {request.client.host}")
|
122 |
+
if len(state.messages) > 0:
|
123 |
+
state.messages[-1][-1] = None
|
124 |
+
prev_human_msg = state.messages[-2]
|
125 |
+
if type(prev_human_msg[1]) in (tuple, list):
|
126 |
+
prev_human_msg[1] = (*prev_human_msg[1][:2], image_process_mode)
|
127 |
+
state.skip_next = False
|
128 |
+
return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 5
|
129 |
+
|
130 |
+
|
131 |
+
def clear_history(request: gr.Request):
|
132 |
+
logger.info(f"clear_history. ip: {request.client.host}")
|
133 |
+
state = default_conversation.copy()
|
134 |
+
return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 5
|
135 |
+
|
136 |
+
|
137 |
+
def add_text(state, text, image, image_process_mode, request: gr.Request):
|
138 |
+
logger.info(f"add_text. ip: {request.client.host}. len: {len(text)}")
|
139 |
+
if len(text) <= 0 and image is None:
|
140 |
+
state.skip_next = True
|
141 |
+
return (state, state.to_gradio_chatbot(), "", None) + (no_change_btn,) * 5
|
142 |
+
if args.moderate:
|
143 |
+
flagged = violates_moderation(text)
|
144 |
+
if flagged:
|
145 |
+
state.skip_next = True
|
146 |
+
return (state, state.to_gradio_chatbot(), moderation_msg, None) + (
|
147 |
+
no_change_btn,) * 5
|
148 |
+
|
149 |
+
text = text[:1536] # Hard cut-off
|
150 |
+
if image is not None:
|
151 |
+
text = text[:1200] # Hard cut-off for images
|
152 |
+
if '<image>' not in text:
|
153 |
+
# text = '<Image><image></Image>' + text
|
154 |
+
text = text + '\n<image>'
|
155 |
+
text = (text, image, image_process_mode)
|
156 |
+
if len(state.get_images(return_pil=True)) > 0:
|
157 |
+
state = default_conversation.copy()
|
158 |
+
state.append_message(state.roles[0], text)
|
159 |
+
state.append_message(state.roles[1], None)
|
160 |
+
state.skip_next = False
|
161 |
+
return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 5
|
162 |
+
|
163 |
+
|
164 |
+
def http_bot(state, model_selector, request: gr.Request):
|
165 |
+
logger.info(f"http_bot. ip: {request.client.host}")
|
166 |
+
start_tstamp = time.time()
|
167 |
+
model_name = model_selector
|
168 |
+
output = ""
|
169 |
+
image_base64 = ""
|
170 |
+
|
171 |
+
prompt = state.get_prompt()
|
172 |
+
|
173 |
+
try:
|
174 |
+
|
175 |
+
all_images = state.get_images(return_pil=True)
|
176 |
+
output_image = None
|
177 |
+
for image in all_images:
|
178 |
+
output_image = image.copy()
|
179 |
+
buffered = BytesIO()
|
180 |
+
image.save(buffered, format="JPEG")
|
181 |
+
image_base64 = f"data:image/png;base64,{base64.b64encode(buffered.getvalue()).decode()}"
|
182 |
+
|
183 |
+
output = llm.create_chat_completion(
|
184 |
+
max_tokens=1024,
|
185 |
+
messages = [
|
186 |
+
{"role": "system", "content": "You are an assistant who perfectly answer all user request."},
|
187 |
+
{
|
188 |
+
"role": "user",
|
189 |
+
"content": [
|
190 |
+
{"type": "image_url", "image_url": {"url": image_base64 }},
|
191 |
+
{"type" : "text", "text": f"""{prompt}"""}
|
192 |
+
]
|
193 |
+
}
|
194 |
+
]
|
195 |
+
)
|
196 |
+
|
197 |
+
output = output["choices"][0]["message"]["content"]
|
198 |
+
print(output)
|
199 |
+
bboxes = re.findall("\d+\.\d+", output)
|
200 |
+
print(bboxes)
|
201 |
+
print(output, state.messages[-1][-1])
|
202 |
+
for i in range(0, len(bboxes), 4):
|
203 |
+
width, height = output_image.size
|
204 |
+
img1 = ImageDraw.Draw(output_image)
|
205 |
+
img1.rectangle([(float(bboxes[i]) * width, float(bboxes[i+1]) * height), (float(bboxes[i+2]) * width, float(bboxes[i+3]) * height)] , fill ="#ffff33", outline ="red")
|
206 |
+
|
207 |
+
text = output
|
208 |
+
if '<image>' not in text:
|
209 |
+
# text = '<Image><image></Image>' + text
|
210 |
+
text = text + '\n<image>'
|
211 |
+
output = (text, output_image, "Default")
|
212 |
+
|
213 |
+
print(output, state.messages[-1][-1])
|
214 |
+
state.append_message(state.roles[1], output)
|
215 |
+
# state.messages[-1][-1] = output
|
216 |
+
|
217 |
+
except Exception as e:
|
218 |
+
logger.error(f"{e}")
|
219 |
+
state.messages[-1][-1] = server_error_msg
|
220 |
+
yield (state, state.to_gradio_chatbot()) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
|
221 |
+
return
|
222 |
+
|
223 |
+
# if output != "":
|
224 |
+
# if type(state.messages[-1][-1]) is not tuple:
|
225 |
+
# state.messages[-1][-1] = state.messages[-1][-1][:-1]
|
226 |
+
|
227 |
+
# finish_tstamp = time.time()
|
228 |
+
# logger.info(f"{output}")
|
229 |
+
|
230 |
+
|
231 |
+
yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5
|
232 |
+
|
233 |
+
title_markdown = ("""
|
234 |
+
# BLIP
|
235 |
+
""")
|
236 |
+
|
237 |
+
|
238 |
+
block_css = """
|
239 |
+
|
240 |
+
#buttons button {
|
241 |
+
min-width: min(120px,100%);
|
242 |
+
}
|
243 |
+
|
244 |
+
"""
|
245 |
+
|
246 |
+
def build_demo(embed_mode, cur_dir=None):
|
247 |
+
textbox = gr.Textbox(show_label=False, placeholder="Enter text and press ENTER", container=False)
|
248 |
+
with gr.Blocks(title="BLIP", theme=gr.themes.Default(), css=block_css) as demo:
|
249 |
+
state = gr.State()
|
250 |
+
|
251 |
+
if not embed_mode:
|
252 |
+
gr.Markdown(title_markdown)
|
253 |
+
|
254 |
+
models = ["Propose Solution", "Baseline 1", "Baseline 2", "Baseline 3"]
|
255 |
+
|
256 |
+
with gr.Row():
|
257 |
+
with gr.Column(scale=3):
|
258 |
+
with gr.Row(elem_id="model_selector_row"):
|
259 |
+
model_selector = gr.Dropdown(
|
260 |
+
choices=models,
|
261 |
+
value=models[0] if len(models) > 0 else "",
|
262 |
+
interactive=True,
|
263 |
+
show_label=False,
|
264 |
+
container=False,
|
265 |
+
visible=False)
|
266 |
+
|
267 |
+
imagebox = gr.Image(type="pil")
|
268 |
+
image_process_mode = gr.Radio(
|
269 |
+
["Crop", "Resize", "Pad", "Default"],
|
270 |
+
value="Default",
|
271 |
+
label="Preprocess for non-square image", visible=False)
|
272 |
+
|
273 |
+
if cur_dir is None:
|
274 |
+
cur_dir = os.path.dirname(os.path.abspath(__file__))
|
275 |
+
gr.Examples(examples=[
|
276 |
+
[f"{cur_dir}/examples/0.jpg", "What are the violence acts and give me the coordinates (x,y,w,h) to draw bounding box in the image?"],
|
277 |
+
[f"{cur_dir}/examples/1.jpg", "What are the violence acts and give me the coordinates (x,y,w,h) to draw bounding box in the image?"],
|
278 |
+
[f"{cur_dir}/examples/2.jpg", "What are the violence acts and give me the coordinates (x,y,w,h) to draw bounding box in the image?"],
|
279 |
+
# [f"{cur_dir}/examples/0.png", "Wafer Defect Type: No-Defect"],
|
280 |
+
], inputs=[imagebox, textbox])
|
281 |
+
|
282 |
+
with gr.Column(scale=7):
|
283 |
+
|
284 |
+
chatbot = gr.Chatbot(
|
285 |
+
elem_id="chatbot",
|
286 |
+
label="BLIP",
|
287 |
+
height=940,
|
288 |
+
layout="panel",
|
289 |
+
)
|
290 |
+
with gr.Row():
|
291 |
+
with gr.Column(scale=7):
|
292 |
+
textbox.render()
|
293 |
+
with gr.Column(scale=1, min_width=50):
|
294 |
+
submit_btn = gr.Button(value="Send", variant="primary")
|
295 |
+
with gr.Row(elem_id="buttons") as button_row:
|
296 |
+
upvote_btn = gr.Button(value="👍 Upvote")
|
297 |
+
downvote_btn = gr.Button(value="👎 Downvote")
|
298 |
+
flag_btn = gr.Button(value="⚠️ Flag")
|
299 |
+
regenerate_btn = gr.Button(value="🔄 Regenerate")
|
300 |
+
clear_btn = gr.Button(value="🗑️ Clear")
|
301 |
+
|
302 |
+
|
303 |
+
url_params = gr.JSON(visible=False)
|
304 |
+
|
305 |
+
# Register listeners
|
306 |
+
btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
|
307 |
+
upvote_btn.click(
|
308 |
+
upvote_last_response,
|
309 |
+
[state, model_selector],
|
310 |
+
[textbox, upvote_btn, downvote_btn, flag_btn],
|
311 |
+
queue=False
|
312 |
+
)
|
313 |
+
downvote_btn.click(
|
314 |
+
downvote_last_response,
|
315 |
+
[state, model_selector],
|
316 |
+
[textbox, upvote_btn, downvote_btn, flag_btn],
|
317 |
+
queue=False
|
318 |
+
)
|
319 |
+
flag_btn.click(
|
320 |
+
flag_last_response,
|
321 |
+
[state, model_selector],
|
322 |
+
[textbox, upvote_btn, downvote_btn, flag_btn],
|
323 |
+
queue=False
|
324 |
+
)
|
325 |
+
|
326 |
+
regenerate_btn.click(
|
327 |
+
regenerate,
|
328 |
+
[state, image_process_mode],
|
329 |
+
[state, chatbot, textbox, imagebox] + btn_list,
|
330 |
+
queue=False
|
331 |
+
).then(
|
332 |
+
http_bot,
|
333 |
+
# [state, model_selector, temperature, top_p, max_output_tokens, gen_image, use_ocr],
|
334 |
+
[state, model_selector],
|
335 |
+
[state, chatbot] + btn_list,
|
336 |
+
# concurrency_limit=concurrency_count
|
337 |
+
queue=False
|
338 |
+
)
|
339 |
+
|
340 |
+
clear_btn.click(
|
341 |
+
clear_history,
|
342 |
+
None,
|
343 |
+
[state, chatbot, textbox, imagebox] + btn_list,
|
344 |
+
queue=False
|
345 |
+
)
|
346 |
+
|
347 |
+
textbox.submit(
|
348 |
+
add_text,
|
349 |
+
[state, textbox, imagebox, image_process_mode],
|
350 |
+
[state, chatbot, textbox, imagebox] + btn_list,
|
351 |
+
queue=False
|
352 |
+
).then(
|
353 |
+
http_bot,
|
354 |
+
# [state, model_selector, temperature, top_p, max_output_tokens, gen_image, use_ocr],
|
355 |
+
[state, model_selector],
|
356 |
+
[state, chatbot] + btn_list,
|
357 |
+
# concurrency_limit=concurrency_count
|
358 |
+
)
|
359 |
+
|
360 |
+
submit_btn.click(
|
361 |
+
add_text,
|
362 |
+
[state, textbox, imagebox, image_process_mode],
|
363 |
+
[state, chatbot, textbox, imagebox] + btn_list,
|
364 |
+
queue=False
|
365 |
+
).then(
|
366 |
+
http_bot,
|
367 |
+
# [state, model_selector, temperature, top_p, max_output_tokens, gen_image, use_ocr],
|
368 |
+
[state, model_selector],
|
369 |
+
[state, chatbot] + btn_list,
|
370 |
+
# concurrency_limit=concurrency_count
|
371 |
+
queue=False
|
372 |
+
)
|
373 |
+
|
374 |
+
if args.model_list_mode == "once":
|
375 |
+
demo.load(
|
376 |
+
load_demo,
|
377 |
+
[url_params],
|
378 |
+
[state, model_selector],
|
379 |
+
_js=get_window_url_params
|
380 |
+
)
|
381 |
+
elif args.model_list_mode == "reload":
|
382 |
+
demo.load(
|
383 |
+
load_demo_refresh_model_list,
|
384 |
+
None,
|
385 |
+
[state, model_selector],
|
386 |
+
queue=False
|
387 |
+
)
|
388 |
+
else:
|
389 |
+
raise ValueError(f"Unknown model list mode: {args.model_list_mode}")
|
390 |
+
|
391 |
+
return demo
|
392 |
+
|
393 |
+
|
394 |
+
if __name__ == "__main__":
|
395 |
+
parser = argparse.ArgumentParser()
|
396 |
+
# parser.add_argument("--host", type=str, default="0.0.0.0")
|
397 |
+
# parser.add_argument("--port", type=int)
|
398 |
+
# parser.add_argument("--controller-url", type=str, default="http://localhost:21001")
|
399 |
+
parser.add_argument("--concurrency-count", type=int, default=16)
|
400 |
+
parser.add_argument("--model-list-mode", type=str, default="reload",
|
401 |
+
choices=["once", "reload"])
|
402 |
+
parser.add_argument("--share", action="store_true")
|
403 |
+
parser.add_argument("--moderate", action="store_true")
|
404 |
+
parser.add_argument("--embed", action="store_true")
|
405 |
+
args = parser.parse_args()
|
406 |
+
logger.info(f"args: {args}")
|
407 |
+
|
408 |
+
logger.info(args)
|
409 |
+
demo = build_demo(args.embed)
|
410 |
+
demo.queue(
|
411 |
+
api_open=False
|
412 |
+
).launch(
|
413 |
+
# server_name=args.host,
|
414 |
+
# server_port=args.port,
|
415 |
+
share=args.share
|
416 |
+
)
|
constants.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
CONTROLLER_HEART_BEAT_EXPIRATION = 30
|
2 |
+
WORKER_HEART_BEAT_INTERVAL = 15
|
3 |
+
|
4 |
+
LOGDIR = "."
|
5 |
+
|
6 |
+
# Model Constants
|
7 |
+
IGNORE_INDEX = -100
|
8 |
+
IMAGE_TOKEN_INDEX = -200
|
9 |
+
PREDICT_TOKEN_INDEX = -300
|
10 |
+
DEFAULT_IMAGE_TOKEN = "<image>"
|
11 |
+
DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
|
12 |
+
DEFAULT_IM_START_TOKEN = "<im_start>"
|
13 |
+
DEFAULT_IM_END_TOKEN = "<im_end>"
|
14 |
+
IMAGE_PLACEHOLDER = "<image-placeholder>"
|
15 |
+
DEFAULT_PREDICT_TOKEN = "<predict>"
|
16 |
+
|
17 |
+
DESCRIPT_PROMPT = [
|
18 |
+
"Describe this image thoroughly.",
|
19 |
+
"Provide a detailed description in this picture.",
|
20 |
+
"Detail every aspect of what's in this picture.",
|
21 |
+
"Explain this image with precision and detail.",
|
22 |
+
"Give a comprehensive description of this visual.",
|
23 |
+
"Elaborate on the specifics within this image.",
|
24 |
+
"Offer a detailed account of this picture's contents.",
|
25 |
+
"Describe in detail what this image portrays.",
|
26 |
+
"Break down this image into detailed descriptions.",
|
27 |
+
"Provide a thorough description of the elements in this image."]
|
conversation.py
ADDED
@@ -0,0 +1,460 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import dataclasses
|
2 |
+
from enum import auto, Enum
|
3 |
+
from typing import List, Tuple
|
4 |
+
import base64
|
5 |
+
from io import BytesIO
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
|
9 |
+
class SeparatorStyle(Enum):
|
10 |
+
"""Different separator style."""
|
11 |
+
SINGLE = auto()
|
12 |
+
TWO = auto()
|
13 |
+
MPT = auto()
|
14 |
+
PLAIN = auto()
|
15 |
+
LLAMA_2 = auto()
|
16 |
+
GEMMA = auto()
|
17 |
+
|
18 |
+
|
19 |
+
@dataclasses.dataclass
|
20 |
+
class Conversation:
|
21 |
+
"""A class that keeps all conversation history."""
|
22 |
+
system: str
|
23 |
+
roles: List[str]
|
24 |
+
messages: List[List[str]]
|
25 |
+
offset: int
|
26 |
+
sep_style: SeparatorStyle = SeparatorStyle.SINGLE
|
27 |
+
sep: str = "###"
|
28 |
+
sep2: str = None
|
29 |
+
version: str = "Unknown"
|
30 |
+
|
31 |
+
skip_next: bool = False
|
32 |
+
|
33 |
+
def get_prompt(self):
|
34 |
+
messages = self.messages
|
35 |
+
if len(messages) > 0 and type(messages[0][1]) is tuple:
|
36 |
+
messages = self.messages.copy()
|
37 |
+
init_role, init_msg = messages[0].copy()
|
38 |
+
init_msg = init_msg[0].replace("<image>", "").strip()
|
39 |
+
if 'mmtag' in self.version:
|
40 |
+
messages[0] = (init_role, init_msg)
|
41 |
+
messages.insert(0, (self.roles[0], "<Image><image></Image>"))
|
42 |
+
messages.insert(1, (self.roles[1], "Received."))
|
43 |
+
else:
|
44 |
+
messages[0] = (init_role, "<image>\n" + init_msg)
|
45 |
+
|
46 |
+
if self.sep_style == SeparatorStyle.SINGLE:
|
47 |
+
ret = self.system + self.sep
|
48 |
+
for role, message in messages:
|
49 |
+
if message:
|
50 |
+
if type(message) is tuple:
|
51 |
+
message = message[0]
|
52 |
+
ret += role + ": " + message + self.sep
|
53 |
+
else:
|
54 |
+
ret += role + ":"
|
55 |
+
elif self.sep_style == SeparatorStyle.TWO:
|
56 |
+
seps = [self.sep, self.sep2]
|
57 |
+
ret = self.system + seps[0]
|
58 |
+
for i, (role, message) in enumerate(messages):
|
59 |
+
if message:
|
60 |
+
if type(message) is tuple:
|
61 |
+
message = message[0]
|
62 |
+
ret += role + ": " + message + seps[i % 2]
|
63 |
+
else:
|
64 |
+
ret += role + ":"
|
65 |
+
elif self.sep_style == SeparatorStyle.MPT:
|
66 |
+
ret = self.system + self.sep
|
67 |
+
for role, message in messages:
|
68 |
+
if message:
|
69 |
+
if type(message) is tuple:
|
70 |
+
message = message[0]
|
71 |
+
ret += role + message + self.sep
|
72 |
+
else:
|
73 |
+
ret += role
|
74 |
+
elif self.sep_style == SeparatorStyle.LLAMA_2:
|
75 |
+
wrap_sys = lambda msg: f"<<SYS>>\n{msg}\n<</SYS>>\n\n" if len(msg) > 0 else msg
|
76 |
+
wrap_inst = lambda msg: f"[INST] {msg} [/INST]"
|
77 |
+
ret = ""
|
78 |
+
|
79 |
+
for i, (role, message) in enumerate(messages):
|
80 |
+
if i == 0:
|
81 |
+
assert message, "first message should not be none"
|
82 |
+
assert role == self.roles[0], "first message should come from user"
|
83 |
+
if message:
|
84 |
+
if type(message) is tuple:
|
85 |
+
message, _, _ = message
|
86 |
+
if i == 0: message = wrap_sys(self.system) + message
|
87 |
+
if i % 2 == 0:
|
88 |
+
message = wrap_inst(message)
|
89 |
+
ret += self.sep + message
|
90 |
+
else:
|
91 |
+
ret += " " + message + " " + self.sep2
|
92 |
+
else:
|
93 |
+
ret += ""
|
94 |
+
ret = ret.lstrip(self.sep)
|
95 |
+
elif self.sep_style == SeparatorStyle.GEMMA:
|
96 |
+
seps = [self.sep, self.sep2]
|
97 |
+
ret = self.system + seps[0]
|
98 |
+
for i, (role, message) in enumerate(messages):
|
99 |
+
if message:
|
100 |
+
if type(message) is tuple:
|
101 |
+
message, _, _ = message
|
102 |
+
ret += "<start_of_turn>" + role + "\n" + message + "<end_of_turn>\n" + seps[i % 2]
|
103 |
+
else:
|
104 |
+
ret += "<start_of_turn>" + role + "\n"
|
105 |
+
elif self.sep_style == SeparatorStyle.PLAIN:
|
106 |
+
seps = [self.sep, self.sep2]
|
107 |
+
ret = self.system
|
108 |
+
for i, (role, message) in enumerate(messages):
|
109 |
+
if message:
|
110 |
+
if type(message) is tuple:
|
111 |
+
message, _, _ = message
|
112 |
+
ret += message + seps[i % 2]
|
113 |
+
else:
|
114 |
+
ret += ""
|
115 |
+
else:
|
116 |
+
raise ValueError(f"Invalid style: {self.sep_style}")
|
117 |
+
|
118 |
+
return ret
|
119 |
+
|
120 |
+
def append_message(self, role, message):
|
121 |
+
self.messages.append([role, message])
|
122 |
+
|
123 |
+
def process_image(self, image, image_process_mode, return_pil=False, image_format='PNG', max_len=1344, min_len=672):
|
124 |
+
if image_process_mode == "Pad":
|
125 |
+
def expand2square(pil_img, background_color=(122, 116, 104)):
|
126 |
+
width, height = pil_img.size
|
127 |
+
if width == height:
|
128 |
+
return pil_img
|
129 |
+
elif width > height:
|
130 |
+
result = Image.new(pil_img.mode, (width, width), background_color)
|
131 |
+
result.paste(pil_img, (0, (width - height) // 2))
|
132 |
+
return result
|
133 |
+
else:
|
134 |
+
result = Image.new(pil_img.mode, (height, height), background_color)
|
135 |
+
result.paste(pil_img, ((height - width) // 2, 0))
|
136 |
+
return result
|
137 |
+
image = expand2square(image)
|
138 |
+
elif image_process_mode in ["Default", "Crop"]:
|
139 |
+
pass
|
140 |
+
elif image_process_mode == "Resize":
|
141 |
+
image = image.resize((336, 336))
|
142 |
+
else:
|
143 |
+
raise ValueError(f"Invalid image_process_mode: {image_process_mode}")
|
144 |
+
if max(image.size) > max_len:
|
145 |
+
max_hw, min_hw = max(image.size), min(image.size)
|
146 |
+
aspect_ratio = max_hw / min_hw
|
147 |
+
shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
|
148 |
+
longest_edge = int(shortest_edge * aspect_ratio)
|
149 |
+
W, H = image.size
|
150 |
+
if H > W:
|
151 |
+
H, W = longest_edge, shortest_edge
|
152 |
+
else:
|
153 |
+
H, W = shortest_edge, longest_edge
|
154 |
+
image = image.resize((W, H))
|
155 |
+
if return_pil:
|
156 |
+
return image
|
157 |
+
else:
|
158 |
+
buffered = BytesIO()
|
159 |
+
image.save(buffered, format=image_format)
|
160 |
+
img_b64_str = base64.b64encode(buffered.getvalue()).decode()
|
161 |
+
return img_b64_str
|
162 |
+
|
163 |
+
def get_images(self, return_pil=False):
|
164 |
+
images = []
|
165 |
+
for i, (role, msg) in enumerate(self.messages[self.offset:]):
|
166 |
+
if i % 2 == 0:
|
167 |
+
if type(msg) is tuple:
|
168 |
+
msg, image, image_process_mode = msg
|
169 |
+
image = self.process_image(image, image_process_mode, return_pil=return_pil)
|
170 |
+
images.append(image)
|
171 |
+
return images
|
172 |
+
|
173 |
+
def to_gradio_chatbot(self):
|
174 |
+
ret = []
|
175 |
+
for i, (role, msg) in enumerate(self.messages[self.offset:]):
|
176 |
+
if i % 2 == 0:
|
177 |
+
if type(msg) is tuple:
|
178 |
+
msg, image, image_process_mode = msg
|
179 |
+
img_b64_str = self.process_image(
|
180 |
+
image, "Default", return_pil=False,
|
181 |
+
image_format='JPEG')
|
182 |
+
img_str = f'<img src="data:image/jpeg;base64,{img_b64_str}" alt="user upload image" />'
|
183 |
+
msg = img_str + msg.replace('<image>', '').strip()
|
184 |
+
ret.append([msg, None])
|
185 |
+
else:
|
186 |
+
ret.append([msg, None])
|
187 |
+
else:
|
188 |
+
if type(msg) is tuple and len(msg) == 2:
|
189 |
+
msg, img_b64_str = msg
|
190 |
+
img_str = f'<img src="data:image/jpeg;base64,{img_b64_str}" alt="user upload image" />'
|
191 |
+
msg = msg.strip() + img_str
|
192 |
+
ret[-1][-1] = msg
|
193 |
+
return ret
|
194 |
+
|
195 |
+
def copy(self):
|
196 |
+
return Conversation(
|
197 |
+
system=self.system,
|
198 |
+
roles=self.roles,
|
199 |
+
messages=[[x, y] for x, y in self.messages],
|
200 |
+
offset=self.offset,
|
201 |
+
sep_style=self.sep_style,
|
202 |
+
sep=self.sep,
|
203 |
+
sep2=self.sep2,
|
204 |
+
version=self.version)
|
205 |
+
|
206 |
+
def dict(self):
|
207 |
+
if len(self.get_images()) > 0:
|
208 |
+
return {
|
209 |
+
"system": self.system,
|
210 |
+
"roles": self.roles,
|
211 |
+
"messages": [[x, y[0] if type(y) is tuple else y] for x, y in self.messages],
|
212 |
+
"offset": self.offset,
|
213 |
+
"sep": self.sep,
|
214 |
+
"sep2": self.sep2,
|
215 |
+
}
|
216 |
+
return {
|
217 |
+
"system": self.system,
|
218 |
+
"roles": self.roles,
|
219 |
+
"messages": self.messages,
|
220 |
+
"offset": self.offset,
|
221 |
+
"sep": self.sep,
|
222 |
+
"sep2": self.sep2,
|
223 |
+
}
|
224 |
+
|
225 |
+
|
226 |
+
conv_vicuna_v0 = Conversation(
|
227 |
+
system="A chat between a curious human and an artificial intelligence assistant. "
|
228 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
229 |
+
roles=("Human", "Assistant"),
|
230 |
+
messages=(
|
231 |
+
("Human", "What are the key differences between renewable and non-renewable energy sources?"),
|
232 |
+
("Assistant",
|
233 |
+
"Renewable energy sources are those that can be replenished naturally in a relatively "
|
234 |
+
"short amount of time, such as solar, wind, hydro, geothermal, and biomass. "
|
235 |
+
"Non-renewable energy sources, on the other hand, are finite and will eventually be "
|
236 |
+
"depleted, such as coal, oil, and natural gas. Here are some key differences between "
|
237 |
+
"renewable and non-renewable energy sources:\n"
|
238 |
+
"1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable "
|
239 |
+
"energy sources are finite and will eventually run out.\n"
|
240 |
+
"2. Environmental impact: Renewable energy sources have a much lower environmental impact "
|
241 |
+
"than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, "
|
242 |
+
"and other negative effects.\n"
|
243 |
+
"3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically "
|
244 |
+
"have lower operational costs than non-renewable sources.\n"
|
245 |
+
"4. Reliability: Renewable energy sources are often more reliable and can be used in more remote "
|
246 |
+
"locations than non-renewable sources.\n"
|
247 |
+
"5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different "
|
248 |
+
"situations and needs, while non-renewable sources are more rigid and inflexible.\n"
|
249 |
+
"6. Sustainability: Renewable energy sources are more sustainable over the long term, while "
|
250 |
+
"non-renewable sources are not, and their depletion can lead to economic and social instability.\n")
|
251 |
+
),
|
252 |
+
offset=2,
|
253 |
+
sep_style=SeparatorStyle.SINGLE,
|
254 |
+
sep="###",
|
255 |
+
)
|
256 |
+
|
257 |
+
conv_vicuna_v1 = Conversation(
|
258 |
+
system="A chat between a curious user and an artificial intelligence assistant. "
|
259 |
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.",
|
260 |
+
roles=("USER", "ASSISTANT"),
|
261 |
+
version="v1",
|
262 |
+
messages=(),
|
263 |
+
offset=0,
|
264 |
+
sep_style=SeparatorStyle.TWO,
|
265 |
+
sep=" ",
|
266 |
+
sep2="</s>",
|
267 |
+
)
|
268 |
+
|
269 |
+
conv_llama_2 = Conversation(
|
270 |
+
system="""You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
|
271 |
+
|
272 |
+
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
|
273 |
+
roles=("USER", "ASSISTANT"),
|
274 |
+
version="llama_v2",
|
275 |
+
messages=(),
|
276 |
+
offset=0,
|
277 |
+
sep_style=SeparatorStyle.LLAMA_2,
|
278 |
+
sep="<s>",
|
279 |
+
sep2="</s>",
|
280 |
+
)
|
281 |
+
|
282 |
+
conv_llava_llama_2 = Conversation(
|
283 |
+
system="You are a helpful language and vision assistant. "
|
284 |
+
"You are able to understand the visual content that the user provides, "
|
285 |
+
"and assist the user with a variety of tasks using natural language.",
|
286 |
+
roles=("USER", "ASSISTANT"),
|
287 |
+
version="llama_v2",
|
288 |
+
messages=(),
|
289 |
+
offset=0,
|
290 |
+
sep_style=SeparatorStyle.LLAMA_2,
|
291 |
+
sep="<s>",
|
292 |
+
sep2="</s>",
|
293 |
+
)
|
294 |
+
|
295 |
+
conv_mpt = Conversation(
|
296 |
+
system="""<|im_start|>system
|
297 |
+
A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.""",
|
298 |
+
roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
|
299 |
+
version="mpt",
|
300 |
+
messages=(),
|
301 |
+
offset=0,
|
302 |
+
sep_style=SeparatorStyle.MPT,
|
303 |
+
sep="<|im_end|>",
|
304 |
+
)
|
305 |
+
|
306 |
+
conv_llava_plain = Conversation(
|
307 |
+
system="",
|
308 |
+
roles=("", ""),
|
309 |
+
messages=(
|
310 |
+
),
|
311 |
+
offset=0,
|
312 |
+
sep_style=SeparatorStyle.PLAIN,
|
313 |
+
sep="\n",
|
314 |
+
)
|
315 |
+
|
316 |
+
conv_llava_v0 = Conversation(
|
317 |
+
system="A chat between a curious human and an artificial intelligence assistant. "
|
318 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
319 |
+
roles=("Human", "Assistant"),
|
320 |
+
messages=(
|
321 |
+
),
|
322 |
+
offset=0,
|
323 |
+
sep_style=SeparatorStyle.SINGLE,
|
324 |
+
sep="###",
|
325 |
+
)
|
326 |
+
|
327 |
+
conv_llava_v0_mmtag = Conversation(
|
328 |
+
system="A chat between a curious user and an artificial intelligence assistant. "
|
329 |
+
"The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
|
330 |
+
"The visual content will be provided with the following format: <Image>visual content</Image>.",
|
331 |
+
roles=("Human", "Assistant"),
|
332 |
+
messages=(
|
333 |
+
),
|
334 |
+
offset=0,
|
335 |
+
sep_style=SeparatorStyle.SINGLE,
|
336 |
+
sep="###",
|
337 |
+
version="v0_mmtag",
|
338 |
+
)
|
339 |
+
|
340 |
+
conv_llava_v1 = Conversation(
|
341 |
+
system="A chat between a curious human and an artificial intelligence assistant. "
|
342 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
343 |
+
roles=("USER", "ASSISTANT"),
|
344 |
+
version="v1",
|
345 |
+
messages=(),
|
346 |
+
offset=0,
|
347 |
+
sep_style=SeparatorStyle.TWO,
|
348 |
+
sep=" ",
|
349 |
+
sep2="</s>",
|
350 |
+
)
|
351 |
+
|
352 |
+
conv_vicuna_imgsp_v1 = Conversation(
|
353 |
+
system="A chat between a curious user and an artificial intelligence assistant. "
|
354 |
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.",
|
355 |
+
roles=("USER", "ASSISTANT"),
|
356 |
+
version="imgsp_v1",
|
357 |
+
messages=(),
|
358 |
+
offset=0,
|
359 |
+
sep_style=SeparatorStyle.TWO,
|
360 |
+
sep=" ",
|
361 |
+
sep2="</s>",
|
362 |
+
)
|
363 |
+
|
364 |
+
conv_llava_plain_guided = Conversation(
|
365 |
+
system="",
|
366 |
+
roles=("", ""),
|
367 |
+
version="plain_guided",
|
368 |
+
messages=(
|
369 |
+
),
|
370 |
+
offset=0,
|
371 |
+
sep_style=SeparatorStyle.PLAIN,
|
372 |
+
sep="\n",
|
373 |
+
)
|
374 |
+
|
375 |
+
conv_llava_v1_mmtag = Conversation(
|
376 |
+
system="A chat between a curious user and an artificial intelligence assistant. "
|
377 |
+
"The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
|
378 |
+
"The visual content will be provided with the following format: <Image>visual content</Image>.",
|
379 |
+
roles=("USER", "ASSISTANT"),
|
380 |
+
messages=(),
|
381 |
+
offset=0,
|
382 |
+
sep_style=SeparatorStyle.TWO,
|
383 |
+
sep=" ",
|
384 |
+
sep2="</s>",
|
385 |
+
version="v1_mmtag",
|
386 |
+
)
|
387 |
+
|
388 |
+
conv_phi_2 = Conversation(
|
389 |
+
system="A chat between a curious user and an artificial intelligence assistant. "
|
390 |
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.",
|
391 |
+
roles=("USER", "ASSISTANT"),
|
392 |
+
version="phi2",
|
393 |
+
messages=(),
|
394 |
+
offset=0,
|
395 |
+
sep_style=SeparatorStyle.TWO,
|
396 |
+
sep=" ",
|
397 |
+
sep2="<|endoftext|>",
|
398 |
+
)
|
399 |
+
|
400 |
+
conv_mistral_instruct = Conversation(
|
401 |
+
system="",
|
402 |
+
roles=("USER", "ASSISTANT"),
|
403 |
+
version="llama_v2",
|
404 |
+
messages=(),
|
405 |
+
offset=0,
|
406 |
+
sep_style=SeparatorStyle.LLAMA_2,
|
407 |
+
sep="<s>",
|
408 |
+
sep2="</s>",
|
409 |
+
)
|
410 |
+
|
411 |
+
conv_gemma = Conversation(
|
412 |
+
system="",
|
413 |
+
roles=("user", "model"),
|
414 |
+
version="gemma",
|
415 |
+
messages=(),
|
416 |
+
offset=0,
|
417 |
+
sep_style=SeparatorStyle.GEMMA,
|
418 |
+
sep="",
|
419 |
+
sep2="<eos>",
|
420 |
+
)
|
421 |
+
|
422 |
+
conv_chatml_direct = Conversation(
|
423 |
+
system="""<|im_start|>system
|
424 |
+
Answer the questions.""",
|
425 |
+
roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
|
426 |
+
version="mpt",
|
427 |
+
messages=(),
|
428 |
+
offset=0,
|
429 |
+
sep_style=SeparatorStyle.MPT,
|
430 |
+
sep="<|im_end|>",
|
431 |
+
)
|
432 |
+
|
433 |
+
default_conversation = conv_vicuna_v1
|
434 |
+
conv_templates = {
|
435 |
+
"default": conv_vicuna_v0,
|
436 |
+
"v0": conv_vicuna_v0,
|
437 |
+
"v1": conv_vicuna_v1,
|
438 |
+
"vicuna_v1": conv_vicuna_v1,
|
439 |
+
"phi_2": conv_phi_2,
|
440 |
+
"gemma": conv_gemma,
|
441 |
+
"llama_2": conv_llama_2,
|
442 |
+
"imgsp_v1": conv_vicuna_imgsp_v1,
|
443 |
+
"plain_guided": conv_llava_plain_guided,
|
444 |
+
"mistral_instruct": conv_mistral_instruct,
|
445 |
+
"chatml_direct": conv_chatml_direct,
|
446 |
+
"mistral_direct": conv_chatml_direct,
|
447 |
+
"plain": conv_llava_plain,
|
448 |
+
"v0_plain": conv_llava_plain,
|
449 |
+
"llava_v0": conv_llava_v0,
|
450 |
+
"v0_mmtag": conv_llava_v0_mmtag,
|
451 |
+
"llava_v1": conv_llava_v1,
|
452 |
+
"v1_mmtag": conv_llava_v1_mmtag,
|
453 |
+
"llava_llama_2": conv_llava_llama_2,
|
454 |
+
|
455 |
+
"mpt": conv_mpt,
|
456 |
+
}
|
457 |
+
|
458 |
+
|
459 |
+
if __name__ == "__main__":
|
460 |
+
print(default_conversation.get_prompt())
|
examples/0.jpg
ADDED
![]() |
examples/1.jpg
ADDED
![]() |
examples/2.jpg
ADDED
![]() |
gradio_web_server.log
ADDED
@@ -0,0 +1,905 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2024-06-02 03:26:14 | INFO | gradio_web_server | args: Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
2 |
+
2024-06-02 03:26:14 | INFO | gradio_web_server | Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
3 |
+
2024-06-02 03:26:15 | INFO | httpx | HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
4 |
+
2024-06-02 03:26:15 | INFO | httpx | HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
5 |
+
2024-06-02 03:26:15 | INFO | stdout | IMPORTANT: You are using gradio version 4.24.0, however version 4.29.0 is available, please upgrade.
|
6 |
+
2024-06-02 03:26:15 | INFO | stdout | --------
|
7 |
+
2024-06-02 03:26:15 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-initiated-analytics/ "HTTP/1.1 200 OK"
|
8 |
+
2024-06-02 03:26:19 | INFO | stdout | Running on local URL: http://127.0.0.1:7860
|
9 |
+
2024-06-02 03:26:19 | INFO | httpx | HTTP Request: GET http://127.0.0.1:7860/startup-events "HTTP/1.1 200 OK"
|
10 |
+
2024-06-02 03:26:19 | INFO | httpx | HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
11 |
+
2024-06-02 03:26:19 | INFO | stdout |
|
12 |
+
2024-06-02 03:26:19 | INFO | stdout | To create a public link, set `share=True` in `launch()`.
|
13 |
+
2024-06-02 03:26:20 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-launched-telemetry/ "HTTP/1.1 200 OK"
|
14 |
+
2024-06-02 03:31:49 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
15 |
+
2024-06-02 03:32:00 | INFO | gradio_web_server | add_text. ip: 127.0.0.1. len: 99
|
16 |
+
2024-06-02 03:32:00 | ERROR | stderr | Traceback (most recent call last):
|
17 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\routes.py", line 621, in predict
|
18 |
+
2024-06-02 03:32:00 | ERROR | stderr | output = await route_utils.call_process_api(
|
19 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\route_utils.py", line 260, in call_process_api
|
20 |
+
2024-06-02 03:32:00 | ERROR | stderr | output = await app.get_blocks().process_api(
|
21 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1689, in process_api
|
22 |
+
2024-06-02 03:32:00 | ERROR | stderr | result = await self.call_function(
|
23 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1255, in call_function
|
24 |
+
2024-06-02 03:32:00 | ERROR | stderr | prediction = await anyio.to_thread.run_sync(
|
25 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
|
26 |
+
2024-06-02 03:32:00 | ERROR | stderr | return await get_async_backend().run_sync_in_worker_thread(
|
27 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 2144, in run_sync_in_worker_thread
|
28 |
+
2024-06-02 03:32:00 | ERROR | stderr | return await future
|
29 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 851, in run
|
30 |
+
2024-06-02 03:32:00 | ERROR | stderr | result = context.run(func, *args)
|
31 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 750, in wrapper
|
32 |
+
2024-06-02 03:32:00 | ERROR | stderr | response = f(*args, **kwargs)
|
33 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 148, in add_text
|
34 |
+
2024-06-02 03:32:00 | ERROR | stderr | if len(state.get_images(return_pil=True)) > 0:
|
35 |
+
2024-06-02 03:32:00 | ERROR | stderr | AttributeError: 'tuple' object has no attribute 'get_images'
|
36 |
+
2024-06-02 03:32:00 | INFO | gradio_web_server | http_bot. ip: 127.0.0.1
|
37 |
+
2024-06-02 03:32:00 | ERROR | stderr | Traceback (most recent call last):
|
38 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\routes.py", line 621, in predict
|
39 |
+
2024-06-02 03:32:00 | ERROR | stderr | output = await route_utils.call_process_api(
|
40 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\route_utils.py", line 260, in call_process_api
|
41 |
+
2024-06-02 03:32:00 | ERROR | stderr | output = await app.get_blocks().process_api(
|
42 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1689, in process_api
|
43 |
+
2024-06-02 03:32:00 | ERROR | stderr | result = await self.call_function(
|
44 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1267, in call_function
|
45 |
+
2024-06-02 03:32:00 | ERROR | stderr | prediction = await utils.async_iteration(iterator)
|
46 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 574, in async_iteration
|
47 |
+
2024-06-02 03:32:00 | ERROR | stderr | return await iterator.__anext__()
|
48 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 567, in __anext__
|
49 |
+
2024-06-02 03:32:00 | ERROR | stderr | return await anyio.to_thread.run_sync(
|
50 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
|
51 |
+
2024-06-02 03:32:00 | ERROR | stderr | return await get_async_backend().run_sync_in_worker_thread(
|
52 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 2144, in run_sync_in_worker_thread
|
53 |
+
2024-06-02 03:32:00 | ERROR | stderr | return await future
|
54 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 851, in run
|
55 |
+
2024-06-02 03:32:00 | ERROR | stderr | result = context.run(func, *args)
|
56 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 550, in run_sync_iterator_async
|
57 |
+
2024-06-02 03:32:00 | ERROR | stderr | return next(iterator)
|
58 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 733, in gen_wrapper
|
59 |
+
2024-06-02 03:32:00 | ERROR | stderr | response = next(iterator)
|
60 |
+
2024-06-02 03:32:00 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 162, in http_bot
|
61 |
+
2024-06-02 03:32:00 | ERROR | stderr | prompt = state.get_prompt()
|
62 |
+
2024-06-02 03:32:00 | ERROR | stderr | AttributeError: 'tuple' object has no attribute 'get_prompt'
|
63 |
+
2024-06-02 03:34:31 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
64 |
+
2024-06-02 03:35:14 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
65 |
+
2024-06-02 03:35:49 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
66 |
+
2024-06-02 03:36:04 | INFO | gradio_web_server | add_text. ip: 127.0.0.1. len: 99
|
67 |
+
2024-06-02 03:36:04 | ERROR | stderr | Traceback (most recent call last):
|
68 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\routes.py", line 621, in predict
|
69 |
+
2024-06-02 03:36:04 | ERROR | stderr | output = await route_utils.call_process_api(
|
70 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\route_utils.py", line 260, in call_process_api
|
71 |
+
2024-06-02 03:36:04 | ERROR | stderr | output = await app.get_blocks().process_api(
|
72 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1689, in process_api
|
73 |
+
2024-06-02 03:36:04 | ERROR | stderr | result = await self.call_function(
|
74 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1255, in call_function
|
75 |
+
2024-06-02 03:36:04 | ERROR | stderr | prediction = await anyio.to_thread.run_sync(
|
76 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
|
77 |
+
2024-06-02 03:36:04 | ERROR | stderr | return await get_async_backend().run_sync_in_worker_thread(
|
78 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 2144, in run_sync_in_worker_thread
|
79 |
+
2024-06-02 03:36:04 | ERROR | stderr | return await future
|
80 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 851, in run
|
81 |
+
2024-06-02 03:36:04 | ERROR | stderr | result = context.run(func, *args)
|
82 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 750, in wrapper
|
83 |
+
2024-06-02 03:36:04 | ERROR | stderr | response = f(*args, **kwargs)
|
84 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 148, in add_text
|
85 |
+
2024-06-02 03:36:04 | ERROR | stderr | if len(state.get_images(return_pil=True)) > 0:
|
86 |
+
2024-06-02 03:36:04 | ERROR | stderr | AttributeError: 'tuple' object has no attribute 'get_images'
|
87 |
+
2024-06-02 03:36:04 | INFO | gradio_web_server | http_bot. ip: 127.0.0.1
|
88 |
+
2024-06-02 03:36:04 | ERROR | stderr | Traceback (most recent call last):
|
89 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\routes.py", line 621, in predict
|
90 |
+
2024-06-02 03:36:04 | ERROR | stderr | output = await route_utils.call_process_api(
|
91 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\route_utils.py", line 260, in call_process_api
|
92 |
+
2024-06-02 03:36:04 | ERROR | stderr | output = await app.get_blocks().process_api(
|
93 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1689, in process_api
|
94 |
+
2024-06-02 03:36:04 | ERROR | stderr | result = await self.call_function(
|
95 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1267, in call_function
|
96 |
+
2024-06-02 03:36:04 | ERROR | stderr | prediction = await utils.async_iteration(iterator)
|
97 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 574, in async_iteration
|
98 |
+
2024-06-02 03:36:04 | ERROR | stderr | return await iterator.__anext__()
|
99 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 567, in __anext__
|
100 |
+
2024-06-02 03:36:04 | ERROR | stderr | return await anyio.to_thread.run_sync(
|
101 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
|
102 |
+
2024-06-02 03:36:04 | ERROR | stderr | return await get_async_backend().run_sync_in_worker_thread(
|
103 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 2144, in run_sync_in_worker_thread
|
104 |
+
2024-06-02 03:36:04 | ERROR | stderr | return await future
|
105 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 851, in run
|
106 |
+
2024-06-02 03:36:04 | ERROR | stderr | result = context.run(func, *args)
|
107 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 550, in run_sync_iterator_async
|
108 |
+
2024-06-02 03:36:04 | ERROR | stderr | return next(iterator)
|
109 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 733, in gen_wrapper
|
110 |
+
2024-06-02 03:36:04 | ERROR | stderr | response = next(iterator)
|
111 |
+
2024-06-02 03:36:04 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 162, in http_bot
|
112 |
+
2024-06-02 03:36:04 | ERROR | stderr | prompt = state.get_prompt()
|
113 |
+
2024-06-02 03:36:04 | ERROR | stderr | AttributeError: 'tuple' object has no attribute 'get_prompt'
|
114 |
+
2024-06-02 03:39:36 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
115 |
+
2024-06-02 03:39:40 | INFO | stdout | Keyboard interruption in main thread... closing server.
|
116 |
+
2024-06-02 03:39:41 | ERROR | stderr | Traceback (most recent call last):
|
117 |
+
2024-06-02 03:39:41 | ERROR | stderr | File "C:\Users\white\.pyenv\pyenv-win\versions\3.10.5\lib\linecache.py", line 46, in getlines
|
118 |
+
2024-06-02 03:39:41 | ERROR | stderr | File "C:\Users\white\.pyenv\pyenv-win\versions\3.10.5\lib\linecache.py", line 136, in updatecache
|
119 |
+
2024-06-02 03:39:41 | ERROR | stderr | with tokenize.open(fullname) as fp:
|
120 |
+
2024-06-02 03:39:41 | ERROR | stderr | File "C:\Users\white\.pyenv\pyenv-win\versions\3.10.5\lib\tokenize.py", line 394, in open
|
121 |
+
2024-06-02 03:39:42 | ERROR | stderr |
|
122 |
+
2024-06-02 03:39:42 | ERROR | stderr | Original exception was:
|
123 |
+
2024-06-02 03:39:42 | ERROR | stderr | Traceback (most recent call last):
|
124 |
+
2024-06-02 03:39:42 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2400, in block_thread
|
125 |
+
2024-06-02 03:39:42 | ERROR | stderr | time.sleep(0.1)
|
126 |
+
2024-06-02 03:39:42 | ERROR | stderr | KeyboardInterrupt
|
127 |
+
2024-06-02 03:39:42 | ERROR | stderr |
|
128 |
+
2024-06-02 03:39:42 | ERROR | stderr | During handling of the above exception, another exception occurred:
|
129 |
+
2024-06-02 03:39:42 | ERROR | stderr |
|
130 |
+
2024-06-02 03:39:42 | ERROR | stderr | Traceback (most recent call last):
|
131 |
+
2024-06-02 03:39:42 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 350, in <module>
|
132 |
+
2024-06-02 03:39:42 | ERROR | stderr |
|
133 |
+
2024-06-02 03:39:42 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2307, in launch
|
134 |
+
2024-06-02 03:39:42 | ERROR | stderr | self.block_thread()
|
135 |
+
2024-06-02 03:39:42 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2404, in block_thread
|
136 |
+
2024-06-02 03:39:42 | ERROR | stderr | self.server.close()
|
137 |
+
2024-06-02 03:39:42 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\http_server.py", line 68, in close
|
138 |
+
2024-06-02 03:39:42 | ERROR | stderr | self.thread.join()
|
139 |
+
2024-06-02 03:39:42 | ERROR | stderr | File "C:\Users\white\.pyenv\pyenv-win\versions\3.10.5\lib\threading.py", line 1096, in join
|
140 |
+
2024-06-02 03:39:42 | ERROR | stderr | self._wait_for_tstate_lock()
|
141 |
+
2024-06-02 03:39:42 | ERROR | stderr | File "C:\Users\white\.pyenv\pyenv-win\versions\3.10.5\lib\threading.py", line 1116, in _wait_for_tstate_lock
|
142 |
+
2024-06-02 03:39:42 | ERROR | stderr | if lock.acquire(block, timeout):
|
143 |
+
2024-06-02 03:39:42 | ERROR | stderr | KeyboardInterrupt
|
144 |
+
2024-06-02 03:41:40 | INFO | gradio_web_server | args: Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
145 |
+
2024-06-02 03:41:40 | INFO | gradio_web_server | Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
146 |
+
2024-06-02 03:41:40 | INFO | httpx | HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
147 |
+
2024-06-02 03:41:41 | INFO | httpx | HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
148 |
+
2024-06-02 03:41:41 | INFO | stdout | IMPORTANT: You are using gradio version 4.24.0, however version 4.29.0 is available, please upgrade.
|
149 |
+
2024-06-02 03:41:41 | INFO | stdout | --------
|
150 |
+
2024-06-02 03:41:41 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-initiated-analytics/ "HTTP/1.1 200 OK"
|
151 |
+
2024-06-02 03:41:47 | INFO | stdout | Running on local URL: http://127.0.0.1:7860
|
152 |
+
2024-06-02 03:41:47 | INFO | httpx | HTTP Request: GET http://127.0.0.1:7860/startup-events "HTTP/1.1 200 OK"
|
153 |
+
2024-06-02 03:41:47 | INFO | httpx | HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
154 |
+
2024-06-02 03:41:47 | INFO | stdout |
|
155 |
+
2024-06-02 03:41:47 | INFO | stdout | To create a public link, set `share=True` in `launch()`.
|
156 |
+
2024-06-02 03:41:49 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-launched-telemetry/ "HTTP/1.1 200 OK"
|
157 |
+
2024-06-02 03:43:21 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
158 |
+
2024-06-02 03:43:21 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
159 |
+
2024-06-02 03:43:30 | INFO | gradio_web_server | add_text. ip: 127.0.0.1. len: 99
|
160 |
+
2024-06-02 03:43:30 | ERROR | stderr | Traceback (most recent call last):
|
161 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\routes.py", line 621, in predict
|
162 |
+
2024-06-02 03:43:30 | ERROR | stderr | output = await route_utils.call_process_api(
|
163 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\route_utils.py", line 260, in call_process_api
|
164 |
+
2024-06-02 03:43:30 | ERROR | stderr | output = await app.get_blocks().process_api(
|
165 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1689, in process_api
|
166 |
+
2024-06-02 03:43:30 | ERROR | stderr | result = await self.call_function(
|
167 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1255, in call_function
|
168 |
+
2024-06-02 03:43:30 | ERROR | stderr | prediction = await anyio.to_thread.run_sync(
|
169 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
|
170 |
+
2024-06-02 03:43:30 | ERROR | stderr | return await get_async_backend().run_sync_in_worker_thread(
|
171 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 2144, in run_sync_in_worker_thread
|
172 |
+
2024-06-02 03:43:30 | ERROR | stderr | return await future
|
173 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 851, in run
|
174 |
+
2024-06-02 03:43:30 | ERROR | stderr | result = context.run(func, *args)
|
175 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 750, in wrapper
|
176 |
+
2024-06-02 03:43:30 | ERROR | stderr | response = f(*args, **kwargs)
|
177 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 148, in add_text
|
178 |
+
2024-06-02 03:43:30 | ERROR | stderr | if len(state.get_images(return_pil=True)) > 0:
|
179 |
+
2024-06-02 03:43:30 | ERROR | stderr | AttributeError: 'tuple' object has no attribute 'get_images'
|
180 |
+
2024-06-02 03:43:30 | INFO | gradio_web_server | http_bot. ip: 127.0.0.1
|
181 |
+
2024-06-02 03:43:30 | ERROR | stderr | Traceback (most recent call last):
|
182 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\routes.py", line 621, in predict
|
183 |
+
2024-06-02 03:43:30 | ERROR | stderr | output = await route_utils.call_process_api(
|
184 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\route_utils.py", line 260, in call_process_api
|
185 |
+
2024-06-02 03:43:30 | ERROR | stderr | output = await app.get_blocks().process_api(
|
186 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1689, in process_api
|
187 |
+
2024-06-02 03:43:30 | ERROR | stderr | result = await self.call_function(
|
188 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1267, in call_function
|
189 |
+
2024-06-02 03:43:30 | ERROR | stderr | prediction = await utils.async_iteration(iterator)
|
190 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 574, in async_iteration
|
191 |
+
2024-06-02 03:43:30 | ERROR | stderr | return await iterator.__anext__()
|
192 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 567, in __anext__
|
193 |
+
2024-06-02 03:43:30 | ERROR | stderr | return await anyio.to_thread.run_sync(
|
194 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
|
195 |
+
2024-06-02 03:43:30 | ERROR | stderr | return await get_async_backend().run_sync_in_worker_thread(
|
196 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 2144, in run_sync_in_worker_thread
|
197 |
+
2024-06-02 03:43:30 | ERROR | stderr | return await future
|
198 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 851, in run
|
199 |
+
2024-06-02 03:43:30 | ERROR | stderr | result = context.run(func, *args)
|
200 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 550, in run_sync_iterator_async
|
201 |
+
2024-06-02 03:43:30 | ERROR | stderr | return next(iterator)
|
202 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 733, in gen_wrapper
|
203 |
+
2024-06-02 03:43:30 | ERROR | stderr | response = next(iterator)
|
204 |
+
2024-06-02 03:43:30 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 162, in http_bot
|
205 |
+
2024-06-02 03:43:30 | ERROR | stderr | prompt = state.get_prompt()
|
206 |
+
2024-06-02 03:43:30 | ERROR | stderr | AttributeError: 'tuple' object has no attribute 'get_prompt'
|
207 |
+
2024-06-02 03:43:52 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
208 |
+
2024-06-02 03:43:56 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
209 |
+
2024-06-02 03:44:07 | INFO | stdout | Keyboard interruption in main thread... closing server.
|
210 |
+
2024-06-02 03:45:06 | INFO | gradio_web_server | args: Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
211 |
+
2024-06-02 03:45:06 | INFO | gradio_web_server | Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
212 |
+
2024-06-02 03:45:06 | INFO | httpx | HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
213 |
+
2024-06-02 03:45:06 | INFO | stdout | Running on local URL: http://127.0.0.1:7860
|
214 |
+
2024-06-02 03:45:06 | INFO | httpx | HTTP Request: GET http://127.0.0.1:7860/startup-events "HTTP/1.1 200 OK"
|
215 |
+
2024-06-02 03:45:06 | INFO | httpx | HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
216 |
+
2024-06-02 03:45:06 | INFO | stdout |
|
217 |
+
2024-06-02 03:45:06 | INFO | stdout | To create a public link, set `share=True` in `launch()`.
|
218 |
+
2024-06-02 03:45:06 | INFO | httpx | HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
219 |
+
2024-06-02 03:45:06 | INFO | stdout | IMPORTANT: You are using gradio version 4.24.0, however version 4.29.0 is available, please upgrade.
|
220 |
+
2024-06-02 03:45:06 | INFO | stdout | --------
|
221 |
+
2024-06-02 03:45:07 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-initiated-analytics/ "HTTP/1.1 200 OK"
|
222 |
+
2024-06-02 03:45:07 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-launched-telemetry/ "HTTP/1.1 200 OK"
|
223 |
+
2024-06-02 03:45:49 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
224 |
+
2024-06-02 03:45:52 | INFO | gradio_web_server | add_text. ip: 127.0.0.1. len: 99
|
225 |
+
2024-06-02 03:45:52 | ERROR | stderr | Traceback (most recent call last):
|
226 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\routes.py", line 621, in predict
|
227 |
+
2024-06-02 03:45:52 | ERROR | stderr | output = await route_utils.call_process_api(
|
228 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\route_utils.py", line 260, in call_process_api
|
229 |
+
2024-06-02 03:45:52 | ERROR | stderr | output = await app.get_blocks().process_api(
|
230 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1689, in process_api
|
231 |
+
2024-06-02 03:45:52 | ERROR | stderr | result = await self.call_function(
|
232 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1255, in call_function
|
233 |
+
2024-06-02 03:45:52 | ERROR | stderr | prediction = await anyio.to_thread.run_sync(
|
234 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
|
235 |
+
2024-06-02 03:45:52 | ERROR | stderr | return await get_async_backend().run_sync_in_worker_thread(
|
236 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 2144, in run_sync_in_worker_thread
|
237 |
+
2024-06-02 03:45:52 | ERROR | stderr | return await future
|
238 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 851, in run
|
239 |
+
2024-06-02 03:45:52 | ERROR | stderr | result = context.run(func, *args)
|
240 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 750, in wrapper
|
241 |
+
2024-06-02 03:45:52 | ERROR | stderr | response = f(*args, **kwargs)
|
242 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 148, in add_text
|
243 |
+
2024-06-02 03:45:52 | ERROR | stderr | if len(state.get_images(return_pil=True)) > 0:
|
244 |
+
2024-06-02 03:45:52 | ERROR | stderr | AttributeError: 'tuple' object has no attribute 'get_images'
|
245 |
+
2024-06-02 03:45:52 | INFO | gradio_web_server | http_bot. ip: 127.0.0.1
|
246 |
+
2024-06-02 03:45:52 | INFO | stdout | (Conversation(system="A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.", roles=('USER', 'ASSISTANT'), messages=[], offset=0, sep_style=<SeparatorStyle.TWO: 2>, sep=' ', sep2='</s>', version='v1', skip_next=False), <gradio.components.dropdown.Dropdown object at 0x000002E97F9A7DC0>)
|
247 |
+
2024-06-02 03:45:52 | ERROR | stderr | Traceback (most recent call last):
|
248 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\routes.py", line 621, in predict
|
249 |
+
2024-06-02 03:45:52 | ERROR | stderr | output = await route_utils.call_process_api(
|
250 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\route_utils.py", line 260, in call_process_api
|
251 |
+
2024-06-02 03:45:52 | ERROR | stderr | output = await app.get_blocks().process_api(
|
252 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1689, in process_api
|
253 |
+
2024-06-02 03:45:52 | ERROR | stderr | result = await self.call_function(
|
254 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1267, in call_function
|
255 |
+
2024-06-02 03:45:52 | ERROR | stderr | prediction = await utils.async_iteration(iterator)
|
256 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 574, in async_iteration
|
257 |
+
2024-06-02 03:45:52 | ERROR | stderr | return await iterator.__anext__()
|
258 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 567, in __anext__
|
259 |
+
2024-06-02 03:45:52 | ERROR | stderr | return await anyio.to_thread.run_sync(
|
260 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
|
261 |
+
2024-06-02 03:45:52 | ERROR | stderr | return await get_async_backend().run_sync_in_worker_thread(
|
262 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 2144, in run_sync_in_worker_thread
|
263 |
+
2024-06-02 03:45:52 | ERROR | stderr | return await future
|
264 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 851, in run
|
265 |
+
2024-06-02 03:45:52 | ERROR | stderr | result = context.run(func, *args)
|
266 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 550, in run_sync_iterator_async
|
267 |
+
2024-06-02 03:45:52 | ERROR | stderr | return next(iterator)
|
268 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 733, in gen_wrapper
|
269 |
+
2024-06-02 03:45:52 | ERROR | stderr | response = next(iterator)
|
270 |
+
2024-06-02 03:45:52 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 164, in http_bot
|
271 |
+
2024-06-02 03:45:52 | ERROR | stderr | prompt = state.get_prompt()
|
272 |
+
2024-06-02 03:45:52 | ERROR | stderr | AttributeError: 'tuple' object has no attribute 'get_prompt'
|
273 |
+
2024-06-02 03:49:52 | INFO | stdout | Keyboard interruption in main thread... closing server.
|
274 |
+
2024-06-02 03:49:53 | ERROR | stderr | Traceback (most recent call last):
|
275 |
+
2024-06-02 03:49:53 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2400, in block_thread
|
276 |
+
2024-06-02 03:49:53 | ERROR | stderr | time.sleep(0.1)
|
277 |
+
2024-06-02 03:49:53 | ERROR | stderr | KeyboardInterrupt
|
278 |
+
2024-06-02 03:49:53 | ERROR | stderr |
|
279 |
+
2024-06-02 03:49:53 | ERROR | stderr | During handling of the above exception, another exception occurred:
|
280 |
+
2024-06-02 03:49:53 | ERROR | stderr |
|
281 |
+
2024-06-02 03:49:53 | ERROR | stderr | Traceback (most recent call last):
|
282 |
+
2024-06-02 03:49:53 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 351, in <module>
|
283 |
+
2024-06-02 03:49:53 | ERROR | stderr | parser.add_argument("--concurrency-count", type=int, default=16)
|
284 |
+
2024-06-02 03:49:53 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2307, in launch
|
285 |
+
2024-06-02 03:49:53 | ERROR | stderr | self.block_thread()
|
286 |
+
2024-06-02 03:49:53 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2404, in block_thread
|
287 |
+
2024-06-02 03:49:53 | ERROR | stderr | self.server.close()
|
288 |
+
2024-06-02 03:49:53 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\http_server.py", line 68, in close
|
289 |
+
2024-06-02 03:49:53 | ERROR | stderr | self.thread.join()
|
290 |
+
2024-06-02 03:49:53 | ERROR | stderr | File "C:\Users\white\.pyenv\pyenv-win\versions\3.10.5\lib\threading.py", line 1096, in join
|
291 |
+
2024-06-02 03:49:53 | ERROR | stderr | self._wait_for_tstate_lock()
|
292 |
+
2024-06-02 03:49:53 | ERROR | stderr | File "C:\Users\white\.pyenv\pyenv-win\versions\3.10.5\lib\threading.py", line 1116, in _wait_for_tstate_lock
|
293 |
+
2024-06-02 03:49:53 | ERROR | stderr | if lock.acquire(block, timeout):
|
294 |
+
2024-06-02 03:49:53 | ERROR | stderr | KeyboardInterrupt
|
295 |
+
2024-06-02 03:50:34 | INFO | gradio_web_server | args: Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
296 |
+
2024-06-02 03:50:34 | INFO | gradio_web_server | Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
297 |
+
2024-06-02 03:50:34 | INFO | httpx | HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
298 |
+
2024-06-02 03:50:34 | INFO | stdout | Running on local URL: http://127.0.0.1:7860
|
299 |
+
2024-06-02 03:50:34 | INFO | httpx | HTTP Request: GET http://127.0.0.1:7860/startup-events "HTTP/1.1 200 OK"
|
300 |
+
2024-06-02 03:50:34 | INFO | httpx | HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
301 |
+
2024-06-02 03:50:34 | INFO | stdout |
|
302 |
+
2024-06-02 03:50:34 | INFO | stdout | To create a public link, set `share=True` in `launch()`.
|
303 |
+
2024-06-02 03:50:35 | INFO | httpx | HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
304 |
+
2024-06-02 03:50:35 | INFO | stdout | IMPORTANT: You are using gradio version 4.24.0, however version 4.29.0 is available, please upgrade.
|
305 |
+
2024-06-02 03:50:35 | INFO | stdout | --------
|
306 |
+
2024-06-02 03:50:35 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-initiated-analytics/ "HTTP/1.1 200 OK"
|
307 |
+
2024-06-02 03:50:35 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-launched-telemetry/ "HTTP/1.1 200 OK"
|
308 |
+
2024-06-02 03:51:21 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
309 |
+
2024-06-02 03:51:22 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
310 |
+
2024-06-02 03:51:24 | INFO | gradio_web_server | add_text. ip: 127.0.0.1. len: 99
|
311 |
+
2024-06-02 03:51:24 | ERROR | stderr | Traceback (most recent call last):
|
312 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\routes.py", line 621, in predict
|
313 |
+
2024-06-02 03:51:24 | ERROR | stderr | output = await route_utils.call_process_api(
|
314 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\route_utils.py", line 260, in call_process_api
|
315 |
+
2024-06-02 03:51:24 | ERROR | stderr | output = await app.get_blocks().process_api(
|
316 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1689, in process_api
|
317 |
+
2024-06-02 03:51:24 | ERROR | stderr | result = await self.call_function(
|
318 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1255, in call_function
|
319 |
+
2024-06-02 03:51:24 | ERROR | stderr | prediction = await anyio.to_thread.run_sync(
|
320 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
|
321 |
+
2024-06-02 03:51:24 | ERROR | stderr | return await get_async_backend().run_sync_in_worker_thread(
|
322 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 2144, in run_sync_in_worker_thread
|
323 |
+
2024-06-02 03:51:24 | ERROR | stderr | return await future
|
324 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 851, in run
|
325 |
+
2024-06-02 03:51:24 | ERROR | stderr | result = context.run(func, *args)
|
326 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 750, in wrapper
|
327 |
+
2024-06-02 03:51:24 | ERROR | stderr | response = f(*args, **kwargs)
|
328 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 148, in add_text
|
329 |
+
2024-06-02 03:51:24 | ERROR | stderr | if len(state.get_images(return_pil=True)) > 0:
|
330 |
+
2024-06-02 03:51:24 | ERROR | stderr | AttributeError: 'tuple' object has no attribute 'get_images'
|
331 |
+
2024-06-02 03:51:24 | INFO | gradio_web_server | http_bot. ip: 127.0.0.1
|
332 |
+
2024-06-02 03:51:24 | INFO | stdout | (Conversation(system="A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.", roles=('USER', 'ASSISTANT'), messages=[], offset=0, sep_style=<SeparatorStyle.TWO: 2>, sep=' ', sep2='</s>', version='v1', skip_next=False), <gradio.components.dropdown.Dropdown object at 0x0000017EDFB595A0>)
|
333 |
+
2024-06-02 03:51:24 | ERROR | stderr | Traceback (most recent call last):
|
334 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\routes.py", line 621, in predict
|
335 |
+
2024-06-02 03:51:24 | ERROR | stderr | output = await route_utils.call_process_api(
|
336 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\route_utils.py", line 260, in call_process_api
|
337 |
+
2024-06-02 03:51:24 | ERROR | stderr | output = await app.get_blocks().process_api(
|
338 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1689, in process_api
|
339 |
+
2024-06-02 03:51:24 | ERROR | stderr | result = await self.call_function(
|
340 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1267, in call_function
|
341 |
+
2024-06-02 03:51:24 | ERROR | stderr | prediction = await utils.async_iteration(iterator)
|
342 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 574, in async_iteration
|
343 |
+
2024-06-02 03:51:24 | ERROR | stderr | return await iterator.__anext__()
|
344 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 567, in __anext__
|
345 |
+
2024-06-02 03:51:24 | ERROR | stderr | return await anyio.to_thread.run_sync(
|
346 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
|
347 |
+
2024-06-02 03:51:24 | ERROR | stderr | return await get_async_backend().run_sync_in_worker_thread(
|
348 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 2144, in run_sync_in_worker_thread
|
349 |
+
2024-06-02 03:51:24 | ERROR | stderr | return await future
|
350 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 851, in run
|
351 |
+
2024-06-02 03:51:24 | ERROR | stderr | result = context.run(func, *args)
|
352 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 550, in run_sync_iterator_async
|
353 |
+
2024-06-02 03:51:24 | ERROR | stderr | return next(iterator)
|
354 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 733, in gen_wrapper
|
355 |
+
2024-06-02 03:51:24 | ERROR | stderr | response = next(iterator)
|
356 |
+
2024-06-02 03:51:24 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 164, in http_bot
|
357 |
+
2024-06-02 03:51:24 | ERROR | stderr | prompt = state.get_prompt()
|
358 |
+
2024-06-02 03:51:24 | ERROR | stderr | AttributeError: 'tuple' object has no attribute 'get_prompt'
|
359 |
+
2024-06-02 03:55:17 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
360 |
+
2024-06-02 03:55:53 | INFO | gradio_web_server | add_text. ip: 127.0.0.1. len: 99
|
361 |
+
2024-06-02 03:55:54 | ERROR | stderr | Traceback (most recent call last):
|
362 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\routes.py", line 621, in predict
|
363 |
+
2024-06-02 03:55:54 | ERROR | stderr | output = await route_utils.call_process_api(
|
364 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\route_utils.py", line 260, in call_process_api
|
365 |
+
2024-06-02 03:55:54 | ERROR | stderr | output = await app.get_blocks().process_api(
|
366 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1689, in process_api
|
367 |
+
2024-06-02 03:55:54 | ERROR | stderr | result = await self.call_function(
|
368 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1255, in call_function
|
369 |
+
2024-06-02 03:55:54 | ERROR | stderr | prediction = await anyio.to_thread.run_sync(
|
370 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
|
371 |
+
2024-06-02 03:55:54 | ERROR | stderr | return await get_async_backend().run_sync_in_worker_thread(
|
372 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 2144, in run_sync_in_worker_thread
|
373 |
+
2024-06-02 03:55:54 | ERROR | stderr | return await future
|
374 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 851, in run
|
375 |
+
2024-06-02 03:55:54 | ERROR | stderr | result = context.run(func, *args)
|
376 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 750, in wrapper
|
377 |
+
2024-06-02 03:55:54 | ERROR | stderr | response = f(*args, **kwargs)
|
378 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 148, in add_text
|
379 |
+
2024-06-02 03:55:54 | ERROR | stderr | if len(state.get_images(return_pil=True)) > 0:
|
380 |
+
2024-06-02 03:55:54 | ERROR | stderr | AttributeError: 'tuple' object has no attribute 'get_images'
|
381 |
+
2024-06-02 03:55:54 | INFO | gradio_web_server | http_bot. ip: 127.0.0.1
|
382 |
+
2024-06-02 03:55:54 | INFO | stdout | (Conversation(system="A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.", roles=('USER', 'ASSISTANT'), messages=[], offset=0, sep_style=<SeparatorStyle.TWO: 2>, sep=' ', sep2='</s>', version='v1', skip_next=False), <gradio.components.dropdown.Dropdown object at 0x0000017EDFB5B130>)
|
383 |
+
2024-06-02 03:55:54 | ERROR | stderr | Traceback (most recent call last):
|
384 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\routes.py", line 621, in predict
|
385 |
+
2024-06-02 03:55:54 | ERROR | stderr | output = await route_utils.call_process_api(
|
386 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\route_utils.py", line 260, in call_process_api
|
387 |
+
2024-06-02 03:55:54 | ERROR | stderr | output = await app.get_blocks().process_api(
|
388 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1689, in process_api
|
389 |
+
2024-06-02 03:55:54 | ERROR | stderr | result = await self.call_function(
|
390 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1267, in call_function
|
391 |
+
2024-06-02 03:55:54 | ERROR | stderr | prediction = await utils.async_iteration(iterator)
|
392 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 574, in async_iteration
|
393 |
+
2024-06-02 03:55:54 | ERROR | stderr | return await iterator.__anext__()
|
394 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 567, in __anext__
|
395 |
+
2024-06-02 03:55:54 | ERROR | stderr | return await anyio.to_thread.run_sync(
|
396 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
|
397 |
+
2024-06-02 03:55:54 | ERROR | stderr | return await get_async_backend().run_sync_in_worker_thread(
|
398 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 2144, in run_sync_in_worker_thread
|
399 |
+
2024-06-02 03:55:54 | ERROR | stderr | return await future
|
400 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 851, in run
|
401 |
+
2024-06-02 03:55:54 | ERROR | stderr | result = context.run(func, *args)
|
402 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 550, in run_sync_iterator_async
|
403 |
+
2024-06-02 03:55:54 | ERROR | stderr | return next(iterator)
|
404 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 733, in gen_wrapper
|
405 |
+
2024-06-02 03:55:54 | ERROR | stderr | response = next(iterator)
|
406 |
+
2024-06-02 03:55:54 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 164, in http_bot
|
407 |
+
2024-06-02 03:55:54 | ERROR | stderr | prompt = state.get_prompt()
|
408 |
+
2024-06-02 03:55:54 | ERROR | stderr | AttributeError: 'tuple' object has no attribute 'get_prompt'
|
409 |
+
2024-06-02 04:00:23 | INFO | stdout | Keyboard interruption in main thread... closing server.
|
410 |
+
2024-06-02 04:01:03 | INFO | gradio_web_server | args: Namespace(host='0.0.0.0', concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
411 |
+
2024-06-02 04:01:03 | INFO | gradio_web_server | Namespace(host='0.0.0.0', concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
412 |
+
2024-06-02 04:01:04 | INFO | httpx | HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
413 |
+
2024-06-02 04:01:04 | INFO | stdout | Running on local URL: http://127.0.0.1:7860
|
414 |
+
2024-06-02 04:01:04 | INFO | httpx | HTTP Request: GET http://127.0.0.1:7860/startup-events "HTTP/1.1 200 OK"
|
415 |
+
2024-06-02 04:01:04 | INFO | httpx | HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
416 |
+
2024-06-02 04:01:04 | INFO | stdout |
|
417 |
+
2024-06-02 04:01:04 | INFO | stdout | To create a public link, set `share=True` in `launch()`.
|
418 |
+
2024-06-02 04:01:04 | INFO | httpx | HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
419 |
+
2024-06-02 04:01:04 | INFO | stdout | IMPORTANT: You are using gradio version 4.24.0, however version 4.29.0 is available, please upgrade.
|
420 |
+
2024-06-02 04:01:04 | INFO | stdout | --------
|
421 |
+
2024-06-02 04:01:04 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-initiated-analytics/ "HTTP/1.1 200 OK"
|
422 |
+
2024-06-02 04:01:05 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-launched-telemetry/ "HTTP/1.1 200 OK"
|
423 |
+
2024-06-02 04:01:19 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
424 |
+
2024-06-02 04:01:23 | INFO | gradio_web_server | add_text. ip: 127.0.0.1. len: 6
|
425 |
+
2024-06-02 04:01:23 | ERROR | stderr | Traceback (most recent call last):
|
426 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\routes.py", line 621, in predict
|
427 |
+
2024-06-02 04:01:23 | ERROR | stderr | output = await route_utils.call_process_api(
|
428 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\route_utils.py", line 260, in call_process_api
|
429 |
+
2024-06-02 04:01:23 | ERROR | stderr | output = await app.get_blocks().process_api(
|
430 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1689, in process_api
|
431 |
+
2024-06-02 04:01:23 | ERROR | stderr | result = await self.call_function(
|
432 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1255, in call_function
|
433 |
+
2024-06-02 04:01:23 | ERROR | stderr | prediction = await anyio.to_thread.run_sync(
|
434 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
|
435 |
+
2024-06-02 04:01:23 | ERROR | stderr | return await get_async_backend().run_sync_in_worker_thread(
|
436 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 2144, in run_sync_in_worker_thread
|
437 |
+
2024-06-02 04:01:23 | ERROR | stderr | return await future
|
438 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 851, in run
|
439 |
+
2024-06-02 04:01:23 | ERROR | stderr | result = context.run(func, *args)
|
440 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 750, in wrapper
|
441 |
+
2024-06-02 04:01:23 | ERROR | stderr | response = f(*args, **kwargs)
|
442 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 148, in add_text
|
443 |
+
2024-06-02 04:01:23 | ERROR | stderr | state.append_message(state.roles[0], text)
|
444 |
+
2024-06-02 04:01:23 | ERROR | stderr | AttributeError: 'tuple' object has no attribute 'append_message'
|
445 |
+
2024-06-02 04:01:23 | INFO | gradio_web_server | http_bot. ip: 127.0.0.1
|
446 |
+
2024-06-02 04:01:23 | INFO | stdout | (Conversation(system="A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.", roles=('USER', 'ASSISTANT'), messages=[], offset=0, sep_style=<SeparatorStyle.TWO: 2>, sep=' ', sep2='</s>', version='v1', skip_next=False), <gradio.components.dropdown.Dropdown object at 0x00000284FF9C7A90>)
|
447 |
+
2024-06-02 04:01:23 | ERROR | stderr | Traceback (most recent call last):
|
448 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\routes.py", line 621, in predict
|
449 |
+
2024-06-02 04:01:23 | ERROR | stderr | output = await route_utils.call_process_api(
|
450 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\route_utils.py", line 260, in call_process_api
|
451 |
+
2024-06-02 04:01:23 | ERROR | stderr | output = await app.get_blocks().process_api(
|
452 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1689, in process_api
|
453 |
+
2024-06-02 04:01:23 | ERROR | stderr | result = await self.call_function(
|
454 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1267, in call_function
|
455 |
+
2024-06-02 04:01:23 | ERROR | stderr | prediction = await utils.async_iteration(iterator)
|
456 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 574, in async_iteration
|
457 |
+
2024-06-02 04:01:23 | ERROR | stderr | return await iterator.__anext__()
|
458 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 567, in __anext__
|
459 |
+
2024-06-02 04:01:23 | ERROR | stderr | return await anyio.to_thread.run_sync(
|
460 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
|
461 |
+
2024-06-02 04:01:23 | ERROR | stderr | return await get_async_backend().run_sync_in_worker_thread(
|
462 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 2144, in run_sync_in_worker_thread
|
463 |
+
2024-06-02 04:01:23 | ERROR | stderr | return await future
|
464 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 851, in run
|
465 |
+
2024-06-02 04:01:23 | ERROR | stderr | result = context.run(func, *args)
|
466 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 550, in run_sync_iterator_async
|
467 |
+
2024-06-02 04:01:23 | ERROR | stderr | return next(iterator)
|
468 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 733, in gen_wrapper
|
469 |
+
2024-06-02 04:01:23 | ERROR | stderr | response = next(iterator)
|
470 |
+
2024-06-02 04:01:23 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 162, in http_bot
|
471 |
+
2024-06-02 04:01:23 | ERROR | stderr | prompt = state.get_prompt()
|
472 |
+
2024-06-02 04:01:23 | ERROR | stderr | AttributeError: 'tuple' object has no attribute 'get_prompt'
|
473 |
+
2024-06-02 04:01:26 | INFO | gradio_web_server | add_text. ip: 127.0.0.1. len: 99
|
474 |
+
2024-06-02 04:01:26 | ERROR | stderr | Traceback (most recent call last):
|
475 |
+
2024-06-02 04:01:26 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\routes.py", line 621, in predict
|
476 |
+
2024-06-02 04:01:26 | ERROR | stderr | output = await route_utils.call_process_api(
|
477 |
+
2024-06-02 04:01:26 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\route_utils.py", line 260, in call_process_api
|
478 |
+
2024-06-02 04:01:26 | ERROR | stderr | output = await app.get_blocks().process_api(
|
479 |
+
2024-06-02 04:01:26 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1689, in process_api
|
480 |
+
2024-06-02 04:01:26 | ERROR | stderr | result = await self.call_function(
|
481 |
+
2024-06-02 04:01:26 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1255, in call_function
|
482 |
+
2024-06-02 04:01:26 | ERROR | stderr | prediction = await anyio.to_thread.run_sync(
|
483 |
+
2024-06-02 04:01:26 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
|
484 |
+
2024-06-02 04:01:26 | ERROR | stderr | return await get_async_backend().run_sync_in_worker_thread(
|
485 |
+
2024-06-02 04:01:26 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 2144, in run_sync_in_worker_thread
|
486 |
+
2024-06-02 04:01:26 | ERROR | stderr | return await future
|
487 |
+
2024-06-02 04:01:26 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 851, in run
|
488 |
+
2024-06-02 04:01:26 | ERROR | stderr | result = context.run(func, *args)
|
489 |
+
2024-06-02 04:01:26 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 750, in wrapper
|
490 |
+
2024-06-02 04:01:26 | ERROR | stderr | response = f(*args, **kwargs)
|
491 |
+
2024-06-02 04:01:26 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 146, in add_text
|
492 |
+
2024-06-02 04:01:26 | ERROR | stderr | if len(state.get_images(return_pil=True)) > 0:
|
493 |
+
2024-06-02 04:01:26 | ERROR | stderr | AttributeError: 'tuple' object has no attribute 'get_images'
|
494 |
+
2024-06-02 04:01:26 | INFO | gradio_web_server | http_bot. ip: 127.0.0.1
|
495 |
+
2024-06-02 04:01:26 | INFO | stdout | (Conversation(system="A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.", roles=('USER', 'ASSISTANT'), messages=[], offset=0, sep_style=<SeparatorStyle.TWO: 2>, sep=' ', sep2='</s>', version='v1', skip_next=False), <gradio.components.dropdown.Dropdown object at 0x00000284FF9C7A90>)
|
496 |
+
2024-06-02 04:01:26 | ERROR | stderr | Traceback (most recent call last):
|
497 |
+
2024-06-02 04:01:26 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\routes.py", line 621, in predict
|
498 |
+
2024-06-02 04:01:26 | ERROR | stderr | output = await route_utils.call_process_api(
|
499 |
+
2024-06-02 04:01:26 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\route_utils.py", line 260, in call_process_api
|
500 |
+
2024-06-02 04:01:26 | ERROR | stderr | output = await app.get_blocks().process_api(
|
501 |
+
2024-06-02 04:01:26 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1689, in process_api
|
502 |
+
2024-06-02 04:01:26 | ERROR | stderr | result = await self.call_function(
|
503 |
+
2024-06-02 04:01:27 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 1267, in call_function
|
504 |
+
2024-06-02 04:01:27 | ERROR | stderr | prediction = await utils.async_iteration(iterator)
|
505 |
+
2024-06-02 04:01:27 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 574, in async_iteration
|
506 |
+
2024-06-02 04:01:27 | ERROR | stderr | return await iterator.__anext__()
|
507 |
+
2024-06-02 04:01:27 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 567, in __anext__
|
508 |
+
2024-06-02 04:01:27 | ERROR | stderr | return await anyio.to_thread.run_sync(
|
509 |
+
2024-06-02 04:01:27 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
|
510 |
+
2024-06-02 04:01:27 | ERROR | stderr | return await get_async_backend().run_sync_in_worker_thread(
|
511 |
+
2024-06-02 04:01:27 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 2144, in run_sync_in_worker_thread
|
512 |
+
2024-06-02 04:01:27 | ERROR | stderr | return await future
|
513 |
+
2024-06-02 04:01:27 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\anyio\_backends\_asyncio.py", line 851, in run
|
514 |
+
2024-06-02 04:01:27 | ERROR | stderr | result = context.run(func, *args)
|
515 |
+
2024-06-02 04:01:27 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 550, in run_sync_iterator_async
|
516 |
+
2024-06-02 04:01:27 | ERROR | stderr | return next(iterator)
|
517 |
+
2024-06-02 04:01:27 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\utils.py", line 733, in gen_wrapper
|
518 |
+
2024-06-02 04:01:27 | ERROR | stderr | response = next(iterator)
|
519 |
+
2024-06-02 04:01:27 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 162, in http_bot
|
520 |
+
2024-06-02 04:01:27 | ERROR | stderr | prompt = state.get_prompt()
|
521 |
+
2024-06-02 04:01:27 | ERROR | stderr | AttributeError: 'tuple' object has no attribute 'get_prompt'
|
522 |
+
2024-06-02 04:01:46 | INFO | stdout | Keyboard interruption in main thread... closing server.
|
523 |
+
2024-06-02 04:11:09 | INFO | gradio_web_server | args: Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
524 |
+
2024-06-02 04:11:09 | INFO | gradio_web_server | Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
525 |
+
2024-06-02 04:11:09 | INFO | httpx | HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
526 |
+
2024-06-02 04:11:09 | INFO | stdout | Running on local URL: http://127.0.0.1:7861
|
527 |
+
2024-06-02 04:11:09 | INFO | httpx | HTTP Request: GET http://127.0.0.1:7861/startup-events "HTTP/1.1 200 OK"
|
528 |
+
2024-06-02 04:11:09 | INFO | httpx | HTTP Request: HEAD http://127.0.0.1:7861/ "HTTP/1.1 200 OK"
|
529 |
+
2024-06-02 04:11:09 | INFO | stdout |
|
530 |
+
2024-06-02 04:11:09 | INFO | stdout | To create a public link, set `share=True` in `launch()`.
|
531 |
+
2024-06-02 04:11:09 | INFO | httpx | HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
532 |
+
2024-06-02 04:11:09 | INFO | stdout | IMPORTANT: You are using gradio version 4.24.0, however version 4.29.0 is available, please upgrade.
|
533 |
+
2024-06-02 04:11:09 | INFO | stdout | --------
|
534 |
+
2024-06-02 04:11:10 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-initiated-analytics/ "HTTP/1.1 200 OK"
|
535 |
+
2024-06-02 04:11:10 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-launched-telemetry/ "HTTP/1.1 200 OK"
|
536 |
+
2024-06-02 04:11:28 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
537 |
+
2024-06-02 04:11:31 | INFO | gradio_web_server | add_text. ip: 127.0.0.1. len: 99
|
538 |
+
2024-06-02 04:11:31 | INFO | gradio_web_server | http_bot. ip: 127.0.0.1
|
539 |
+
2024-06-02 04:11:35 | ERROR | gradio_web_server | unknown url type: ''
|
540 |
+
2024-06-02 04:12:06 | INFO | stdout | Keyboard interruption in main thread... closing server.
|
541 |
+
2024-06-02 04:12:07 | ERROR | stderr | Traceback (most recent call last):
|
542 |
+
2024-06-02 04:12:07 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2400, in block_thread
|
543 |
+
2024-06-02 04:12:07 | ERROR | stderr | time.sleep(0.1)
|
544 |
+
2024-06-02 04:12:07 | ERROR | stderr | KeyboardInterrupt
|
545 |
+
2024-06-02 04:12:07 | ERROR | stderr |
|
546 |
+
2024-06-02 04:12:07 | ERROR | stderr | During handling of the above exception, another exception occurred:
|
547 |
+
2024-06-02 04:12:07 | ERROR | stderr |
|
548 |
+
2024-06-02 04:12:07 | ERROR | stderr | Traceback (most recent call last):
|
549 |
+
2024-06-02 04:12:07 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 399, in <module>
|
550 |
+
2024-06-02 04:12:07 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2307, in launch
|
551 |
+
2024-06-02 04:12:07 | ERROR | stderr | self.block_thread()
|
552 |
+
2024-06-02 04:12:07 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2404, in block_thread
|
553 |
+
2024-06-02 04:12:07 | ERROR | stderr | self.server.close()
|
554 |
+
2024-06-02 04:12:07 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\http_server.py", line 68, in close
|
555 |
+
2024-06-02 04:12:07 | ERROR | stderr | self.thread.join()
|
556 |
+
2024-06-02 04:12:07 | ERROR | stderr | File "C:\Users\white\.pyenv\pyenv-win\versions\3.10.5\lib\threading.py", line 1096, in join
|
557 |
+
2024-06-02 04:12:07 | ERROR | stderr | self._wait_for_tstate_lock()
|
558 |
+
2024-06-02 04:12:07 | ERROR | stderr | File "C:\Users\white\.pyenv\pyenv-win\versions\3.10.5\lib\threading.py", line 1116, in _wait_for_tstate_lock
|
559 |
+
2024-06-02 04:12:07 | ERROR | stderr | if lock.acquire(block, timeout):
|
560 |
+
2024-06-02 04:12:07 | ERROR | stderr | KeyboardInterrupt
|
561 |
+
2024-06-02 04:13:42 | INFO | gradio_web_server | args: Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
562 |
+
2024-06-02 04:13:42 | INFO | gradio_web_server | Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
563 |
+
2024-06-02 04:13:42 | ERROR | stderr | Traceback (most recent call last):
|
564 |
+
2024-06-02 04:13:42 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 381, in <module>
|
565 |
+
2024-06-02 04:13:42 | ERROR | stderr | demo = build_demo(args.embed)
|
566 |
+
2024-06-02 04:13:42 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 281, in build_demo
|
567 |
+
2024-06-02 04:13:42 | ERROR | stderr | [state, model_selector],
|
568 |
+
2024-06-02 04:13:42 | ERROR | stderr | NameError: name 'model_selector' is not defined
|
569 |
+
2024-06-02 04:13:42 | INFO | httpx | HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
570 |
+
2024-06-02 04:13:42 | INFO | httpx | HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
571 |
+
2024-06-02 04:13:42 | INFO | stdout | IMPORTANT: You are using gradio version 4.24.0, however version 4.29.0 is available, please upgrade.
|
572 |
+
2024-06-02 04:13:42 | INFO | stdout | --------
|
573 |
+
2024-06-02 04:13:43 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-initiated-analytics/ "HTTP/1.1 200 OK"
|
574 |
+
2024-06-02 04:14:33 | INFO | gradio_web_server | args: Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
575 |
+
2024-06-02 04:14:33 | INFO | gradio_web_server | Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
576 |
+
2024-06-02 04:14:34 | INFO | httpx | HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
577 |
+
2024-06-02 04:14:34 | INFO | stdout | Running on local URL: http://127.0.0.1:7860
|
578 |
+
2024-06-02 04:14:34 | INFO | httpx | HTTP Request: GET http://127.0.0.1:7860/startup-events "HTTP/1.1 200 OK"
|
579 |
+
2024-06-02 04:14:34 | INFO | httpx | HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
580 |
+
2024-06-02 04:14:34 | INFO | stdout |
|
581 |
+
2024-06-02 04:14:34 | INFO | stdout | To create a public link, set `share=True` in `launch()`.
|
582 |
+
2024-06-02 04:14:34 | INFO | httpx | HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
583 |
+
2024-06-02 04:14:34 | INFO | stdout | IMPORTANT: You are using gradio version 4.24.0, however version 4.29.0 is available, please upgrade.
|
584 |
+
2024-06-02 04:14:34 | INFO | stdout | --------
|
585 |
+
2024-06-02 04:14:35 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-initiated-analytics/ "HTTP/1.1 200 OK"
|
586 |
+
2024-06-02 04:14:35 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-launched-telemetry/ "HTTP/1.1 200 OK"
|
587 |
+
2024-06-02 04:14:46 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
588 |
+
2024-06-02 04:14:46 | ERROR | stderr | D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\components\dropdown.py:179: UserWarning: The value passed into gr.Dropdown() is not in the list of choices. Please update the list of choices to include: or set allow_custom_value=True.
|
589 |
+
2024-06-02 04:14:46 | ERROR | stderr | warnings.warn(
|
590 |
+
2024-06-02 04:15:07 | INFO | gradio_web_server | add_text. ip: 127.0.0.1. len: 99
|
591 |
+
2024-06-02 04:15:07 | INFO | gradio_web_server | http_bot. ip: 127.0.0.1
|
592 |
+
2024-06-02 04:18:09 | INFO | gradio_web_server | In this image, there is a man who appears to be in the middle of throwing a punch. He is located at the center of the frame, with his fist raised and his arm extended towards another person. The other person is positioned slightly to the left of the first man, with their arms raised in an attempt to block the incoming punch.
|
593 |
+
|
594 |
+
The background of the image features a parking lot filled with cars and a tree. There are no visible text or other discernible objects in the image. The relative positions of the two men suggest that they are engaged in some sort of altercation, possibly related to the car parked nearby. However, without additional context, it is difficult to determine the exact nature of their conflict.
|
595 |
+
|
596 |
+
To draw a bounding box for these two individuals, one could use the following coordinates: [0.32, 0.31, 0.78, 0.76]
|
597 |
+
2024-06-02 04:31:03 | INFO | stdout | Keyboard interruption in main thread... closing server.
|
598 |
+
2024-06-02 04:31:54 | INFO | gradio_web_server | args: Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
599 |
+
2024-06-02 04:31:54 | INFO | gradio_web_server | Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
600 |
+
2024-06-02 04:31:54 | INFO | httpx | HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
601 |
+
2024-06-02 04:31:54 | INFO | stdout | Running on local URL: http://127.0.0.1:7860
|
602 |
+
2024-06-02 04:31:54 | INFO | httpx | HTTP Request: GET http://127.0.0.1:7860/startup-events "HTTP/1.1 200 OK"
|
603 |
+
2024-06-02 04:31:54 | INFO | httpx | HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
604 |
+
2024-06-02 04:31:54 | INFO | stdout |
|
605 |
+
2024-06-02 04:31:54 | INFO | stdout | To create a public link, set `share=True` in `launch()`.
|
606 |
+
2024-06-02 04:31:54 | INFO | httpx | HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
607 |
+
2024-06-02 04:31:54 | INFO | stdout | IMPORTANT: You are using gradio version 4.24.0, however version 4.29.0 is available, please upgrade.
|
608 |
+
2024-06-02 04:31:54 | INFO | stdout | --------
|
609 |
+
2024-06-02 04:31:55 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-initiated-analytics/ "HTTP/1.1 200 OK"
|
610 |
+
2024-06-02 04:31:55 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-launched-telemetry/ "HTTP/1.1 200 OK"
|
611 |
+
2024-06-02 04:32:41 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
612 |
+
2024-06-02 04:32:41 | ERROR | stderr | D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\components\dropdown.py:179: UserWarning: The value passed into gr.Dropdown() is not in the list of choices. Please update the list of choices to include: or set allow_custom_value=True.
|
613 |
+
2024-06-02 04:32:41 | ERROR | stderr | warnings.warn(
|
614 |
+
2024-06-02 04:32:46 | INFO | gradio_web_server | add_text. ip: 127.0.0.1. len: 99
|
615 |
+
2024-06-02 04:32:46 | INFO | gradio_web_server | http_bot. ip: 127.0.0.1
|
616 |
+
2024-06-02 04:35:20 | INFO | stdout | In the image, there is a man who appears to be hitting a woman with his fist. The woman has visible bruises on her face. The man's hand is located at approximately (0.32, 0.31, 0.48, 0.56) and his arm is extended towards the woman. The woman's face shows signs of injury with a noticeable bruise under her eye and another on her cheek. The man's fist is located at approximately (0.29, 0.31, 0.48, 0.56) and his arm is extended towards the woman. The woman's face shows signs of injury with a noticeable bruise under her eye and another on her cheek.
|
617 |
+
2024-06-02 04:35:20 | ERROR | gradio_web_server | 'str' object has no attribute 'size'
|
618 |
+
2024-06-02 04:36:36 | INFO | stdout | Keyboard interruption in main thread... closing server.
|
619 |
+
2024-06-02 04:36:36 | ERROR | stderr | Traceback (most recent call last):
|
620 |
+
2024-06-02 04:36:36 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2400, in block_thread
|
621 |
+
2024-06-02 04:36:36 | ERROR | stderr | time.sleep(0.1)
|
622 |
+
2024-06-02 04:36:36 | ERROR | stderr | KeyboardInterrupt
|
623 |
+
2024-06-02 04:36:36 | ERROR | stderr |
|
624 |
+
2024-06-02 04:36:36 | ERROR | stderr | During handling of the above exception, another exception occurred:
|
625 |
+
2024-06-02 04:36:36 | ERROR | stderr |
|
626 |
+
2024-06-02 04:36:36 | ERROR | stderr | Traceback (most recent call last):
|
627 |
+
2024-06-02 04:36:36 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 403, in <module>
|
628 |
+
2024-06-02 04:36:36 | ERROR | stderr | demo.queue(
|
629 |
+
2024-06-02 04:36:36 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2307, in launch
|
630 |
+
2024-06-02 04:36:36 | ERROR | stderr | self.block_thread()
|
631 |
+
2024-06-02 04:36:36 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2404, in block_thread
|
632 |
+
2024-06-02 04:36:36 | ERROR | stderr | self.server.close()
|
633 |
+
2024-06-02 04:36:36 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\http_server.py", line 68, in close
|
634 |
+
2024-06-02 04:36:36 | ERROR | stderr | self.thread.join()
|
635 |
+
2024-06-02 04:36:36 | ERROR | stderr | File "C:\Users\white\.pyenv\pyenv-win\versions\3.10.5\lib\threading.py", line 1096, in join
|
636 |
+
2024-06-02 04:36:36 | ERROR | stderr | self._wait_for_tstate_lock()
|
637 |
+
2024-06-02 04:36:36 | ERROR | stderr | File "C:\Users\white\.pyenv\pyenv-win\versions\3.10.5\lib\threading.py", line 1116, in _wait_for_tstate_lock
|
638 |
+
2024-06-02 04:36:36 | ERROR | stderr | if lock.acquire(block, timeout):
|
639 |
+
2024-06-02 04:36:36 | ERROR | stderr | KeyboardInterrupt
|
640 |
+
2024-06-02 04:37:18 | INFO | gradio_web_server | args: Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
641 |
+
2024-06-02 04:37:18 | INFO | gradio_web_server | Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
642 |
+
2024-06-02 04:37:18 | INFO | httpx | HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
643 |
+
2024-06-02 04:37:18 | INFO | stdout | Running on local URL: http://127.0.0.1:7860
|
644 |
+
2024-06-02 04:37:18 | INFO | httpx | HTTP Request: GET http://127.0.0.1:7860/startup-events "HTTP/1.1 200 OK"
|
645 |
+
2024-06-02 04:37:18 | INFO | httpx | HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
646 |
+
2024-06-02 04:37:18 | INFO | stdout |
|
647 |
+
2024-06-02 04:37:18 | INFO | stdout | To create a public link, set `share=True` in `launch()`.
|
648 |
+
2024-06-02 04:37:19 | INFO | httpx | HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
649 |
+
2024-06-02 04:37:19 | INFO | stdout | IMPORTANT: You are using gradio version 4.24.0, however version 4.29.0 is available, please upgrade.
|
650 |
+
2024-06-02 04:37:19 | INFO | stdout | --------
|
651 |
+
2024-06-02 04:37:19 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-initiated-analytics/ "HTTP/1.1 200 OK"
|
652 |
+
2024-06-02 04:37:19 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-launched-telemetry/ "HTTP/1.1 200 OK"
|
653 |
+
2024-06-02 04:37:27 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
654 |
+
2024-06-02 04:37:27 | ERROR | stderr | D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\components\dropdown.py:179: UserWarning: The value passed into gr.Dropdown() is not in the list of choices. Please update the list of choices to include: or set allow_custom_value=True.
|
655 |
+
2024-06-02 04:37:27 | ERROR | stderr | warnings.warn(
|
656 |
+
2024-06-02 04:37:29 | INFO | gradio_web_server | add_text. ip: 127.0.0.1. len: 99
|
657 |
+
2024-06-02 04:37:29 | INFO | gradio_web_server | http_bot. ip: 127.0.0.1
|
658 |
+
2024-06-02 04:39:37 | INFO | stdout | In this image, there is a man holding a knife with both hands. The knife has a red handle and a silver blade. The man's face appears to be distorted or blurred out. Here are the coordinates for drawing a bounding box around the knife:
|
659 |
+
2024-06-02 04:39:37 | INFO | stdout | ```css
|
660 |
+
2024-06-02 04:39:37 | INFO | stdout | [0.1, 0.26, 0.34, 0.45]
|
661 |
+
2024-06-02 04:39:37 | INFO | stdout | ```
|
662 |
+
2024-06-02 04:39:37 | INFO | stdout | The man is wearing a black jacket and has dark hair. The background of the image features a building with columns and a blue sky.
|
663 |
+
2024-06-02 04:39:37 | ERROR | gradio_web_server | 'str' object has no attribute 'rectangle'
|
664 |
+
2024-06-02 04:40:07 | INFO | stdout | Keyboard interruption in main thread... closing server.
|
665 |
+
2024-06-02 04:40:07 | ERROR | stderr | Traceback (most recent call last):
|
666 |
+
2024-06-02 04:40:07 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2400, in block_thread
|
667 |
+
2024-06-02 04:40:07 | ERROR | stderr | time.sleep(0.1)
|
668 |
+
2024-06-02 04:40:07 | ERROR | stderr | KeyboardInterrupt
|
669 |
+
2024-06-02 04:40:07 | ERROR | stderr |
|
670 |
+
2024-06-02 04:40:07 | ERROR | stderr | During handling of the above exception, another exception occurred:
|
671 |
+
2024-06-02 04:40:07 | ERROR | stderr |
|
672 |
+
2024-06-02 04:40:07 | ERROR | stderr | Traceback (most recent call last):
|
673 |
+
2024-06-02 04:40:07 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 403, in <module>
|
674 |
+
2024-06-02 04:40:07 | ERROR | stderr | demo.queue(
|
675 |
+
2024-06-02 04:40:07 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2307, in launch
|
676 |
+
2024-06-02 04:40:07 | ERROR | stderr | self.block_thread()
|
677 |
+
2024-06-02 04:40:07 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2404, in block_thread
|
678 |
+
2024-06-02 04:40:07 | ERROR | stderr | self.server.close()
|
679 |
+
2024-06-02 04:40:07 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\http_server.py", line 68, in close
|
680 |
+
2024-06-02 04:40:07 | ERROR | stderr | self.thread.join()
|
681 |
+
2024-06-02 04:40:07 | ERROR | stderr | File "C:\Users\white\.pyenv\pyenv-win\versions\3.10.5\lib\threading.py", line 1096, in join
|
682 |
+
2024-06-02 04:40:07 | ERROR | stderr | self._wait_for_tstate_lock()
|
683 |
+
2024-06-02 04:40:07 | ERROR | stderr | File "C:\Users\white\.pyenv\pyenv-win\versions\3.10.5\lib\threading.py", line 1116, in _wait_for_tstate_lock
|
684 |
+
2024-06-02 04:40:07 | ERROR | stderr | if lock.acquire(block, timeout):
|
685 |
+
2024-06-02 04:40:07 | ERROR | stderr | KeyboardInterrupt
|
686 |
+
2024-06-02 04:40:56 | INFO | gradio_web_server | args: Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
687 |
+
2024-06-02 04:40:56 | INFO | gradio_web_server | Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
688 |
+
2024-06-02 04:40:56 | INFO | httpx | HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
689 |
+
2024-06-02 04:40:56 | INFO | stdout | Running on local URL: http://127.0.0.1:7860
|
690 |
+
2024-06-02 04:40:56 | INFO | httpx | HTTP Request: GET http://127.0.0.1:7860/startup-events "HTTP/1.1 200 OK"
|
691 |
+
2024-06-02 04:40:56 | INFO | httpx | HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
692 |
+
2024-06-02 04:40:56 | INFO | stdout |
|
693 |
+
2024-06-02 04:40:56 | INFO | stdout | To create a public link, set `share=True` in `launch()`.
|
694 |
+
2024-06-02 04:40:56 | INFO | httpx | HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
695 |
+
2024-06-02 04:40:56 | INFO | stdout | IMPORTANT: You are using gradio version 4.24.0, however version 4.29.0 is available, please upgrade.
|
696 |
+
2024-06-02 04:40:56 | INFO | stdout | --------
|
697 |
+
2024-06-02 04:40:57 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-initiated-analytics/ "HTTP/1.1 200 OK"
|
698 |
+
2024-06-02 04:40:57 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-launched-telemetry/ "HTTP/1.1 200 OK"
|
699 |
+
2024-06-02 04:41:16 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
700 |
+
2024-06-02 04:41:16 | ERROR | stderr | D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\components\dropdown.py:179: UserWarning: The value passed into gr.Dropdown() is not in the list of choices. Please update the list of choices to include: or set allow_custom_value=True.
|
701 |
+
2024-06-02 04:41:16 | ERROR | stderr | warnings.warn(
|
702 |
+
2024-06-02 04:41:19 | INFO | gradio_web_server | add_text. ip: 127.0.0.1. len: 99
|
703 |
+
2024-06-02 04:41:19 | INFO | gradio_web_server | http_bot. ip: 127.0.0.1
|
704 |
+
2024-06-02 04:43:19 | INFO | stdout | In the image, there is a man who appears to be in the middle of throwing a punch. The other person is running away from him. The man's fist is raised and his arm is extended towards the other person. The other person is wearing a gray shirt and white pants.
|
705 |
+
2024-06-02 04:43:19 | INFO | stdout |
|
706 |
+
2024-06-02 04:43:19 | INFO | stdout | The coordinates for drawing a bounding box around these two people would be: [0.26, 0.31, 0.78, 0.79]
|
707 |
+
2024-06-02 04:43:19 | INFO | stdout | ['0.26', '0.31', '0.78', '0.79']
|
708 |
+
2024-06-02 04:43:19 | ERROR | gradio_web_server | 'Image' object has no attribute 'rectangle'
|
709 |
+
2024-06-02 04:44:19 | INFO | stdout | Keyboard interruption in main thread... closing server.
|
710 |
+
2024-06-02 04:44:19 | ERROR | stderr | Traceback (most recent call last):
|
711 |
+
2024-06-02 04:44:19 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2400, in block_thread
|
712 |
+
2024-06-02 04:44:19 | ERROR | stderr | time.sleep(0.1)
|
713 |
+
2024-06-02 04:44:19 | ERROR | stderr | KeyboardInterrupt
|
714 |
+
2024-06-02 04:44:19 | ERROR | stderr |
|
715 |
+
2024-06-02 04:44:19 | ERROR | stderr | During handling of the above exception, another exception occurred:
|
716 |
+
2024-06-02 04:44:19 | ERROR | stderr |
|
717 |
+
2024-06-02 04:44:19 | ERROR | stderr | Traceback (most recent call last):
|
718 |
+
2024-06-02 04:44:19 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 403, in <module>
|
719 |
+
2024-06-02 04:44:19 | ERROR | stderr | demo = build_demo(args.embed)
|
720 |
+
2024-06-02 04:44:19 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2307, in launch
|
721 |
+
2024-06-02 04:44:19 | ERROR | stderr | self.block_thread()
|
722 |
+
2024-06-02 04:44:19 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2404, in block_thread
|
723 |
+
2024-06-02 04:44:19 | ERROR | stderr | self.server.close()
|
724 |
+
2024-06-02 04:44:19 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\http_server.py", line 68, in close
|
725 |
+
2024-06-02 04:44:19 | ERROR | stderr | self.thread.join()
|
726 |
+
2024-06-02 04:44:19 | ERROR | stderr | File "C:\Users\white\.pyenv\pyenv-win\versions\3.10.5\lib\threading.py", line 1096, in join
|
727 |
+
2024-06-02 04:44:19 | ERROR | stderr | self._wait_for_tstate_lock()
|
728 |
+
2024-06-02 04:44:19 | ERROR | stderr | File "C:\Users\white\.pyenv\pyenv-win\versions\3.10.5\lib\threading.py", line 1116, in _wait_for_tstate_lock
|
729 |
+
2024-06-02 04:44:19 | ERROR | stderr | if lock.acquire(block, timeout):
|
730 |
+
2024-06-02 04:44:19 | ERROR | stderr | KeyboardInterrupt
|
731 |
+
2024-06-02 04:45:00 | INFO | gradio_web_server | args: Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
732 |
+
2024-06-02 04:45:00 | INFO | gradio_web_server | Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
733 |
+
2024-06-02 04:45:00 | INFO | httpx | HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
734 |
+
2024-06-02 04:45:00 | INFO | stdout | Running on local URL: http://127.0.0.1:7860
|
735 |
+
2024-06-02 04:45:00 | INFO | httpx | HTTP Request: GET http://127.0.0.1:7860/startup-events "HTTP/1.1 200 OK"
|
736 |
+
2024-06-02 04:45:00 | INFO | httpx | HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
737 |
+
2024-06-02 04:45:00 | INFO | stdout |
|
738 |
+
2024-06-02 04:45:00 | INFO | stdout | To create a public link, set `share=True` in `launch()`.
|
739 |
+
2024-06-02 04:45:01 | INFO | httpx | HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
740 |
+
2024-06-02 04:45:01 | INFO | stdout | IMPORTANT: You are using gradio version 4.24.0, however version 4.29.0 is available, please upgrade.
|
741 |
+
2024-06-02 04:45:01 | INFO | stdout | --------
|
742 |
+
2024-06-02 04:45:01 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-initiated-analytics/ "HTTP/1.1 200 OK"
|
743 |
+
2024-06-02 04:45:01 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-launched-telemetry/ "HTTP/1.1 200 OK"
|
744 |
+
2024-06-02 04:45:04 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
745 |
+
2024-06-02 04:45:04 | ERROR | stderr | D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\components\dropdown.py:179: UserWarning: The value passed into gr.Dropdown() is not in the list of choices. Please update the list of choices to include: or set allow_custom_value=True.
|
746 |
+
2024-06-02 04:45:04 | ERROR | stderr | warnings.warn(
|
747 |
+
2024-06-02 04:45:05 | INFO | gradio_web_server | add_text. ip: 127.0.0.1. len: 99
|
748 |
+
2024-06-02 04:45:05 | INFO | gradio_web_server | http_bot. ip: 127.0.0.1
|
749 |
+
2024-06-02 04:47:35 | INFO | stdout | In the image, there is a man who appears to be in the middle of throwing a punch. The other man is attempting to block the punch with his arms raised. The precise locations and actions of these individuals are as follows:
|
750 |
+
2024-06-02 04:47:35 | INFO | stdout |
|
751 |
+
2024-06-02 04:47:35 | INFO | stdout | 1. Man on left (puncher): 0.32, 0.34, 0.59, 0.76
|
752 |
+
2024-06-02 04:47:35 | INFO | stdout | 2. Man on right (blocker): 0.28, 0.32, 0.75, 0.77
|
753 |
+
2024-06-02 04:47:35 | INFO | stdout |
|
754 |
+
2024-06-02 04:47:35 | INFO | stdout | The image does not provide enough information to accurately determine the exact coordinates of these objects or their actions. However, based on the description provided, it seems that both men are engaged in a physical altercation and are likely positioned close together.
|
755 |
+
2024-06-02 04:47:35 | INFO | stdout | ['0.32', '0.34', '0.59', '0.76', '0.28', '0.32', '0.75', '0.77']
|
756 |
+
2024-06-02 04:47:35 | ERROR | gradio_web_server | unknown color specifier: '# ffff33'
|
757 |
+
2024-06-02 04:48:08 | INFO | stdout | Keyboard interruption in main thread... closing server.
|
758 |
+
2024-06-02 04:48:50 | INFO | gradio_web_server | args: Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
759 |
+
2024-06-02 04:48:50 | INFO | gradio_web_server | Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
760 |
+
2024-06-02 04:48:51 | INFO | httpx | HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
761 |
+
2024-06-02 04:48:51 | INFO | stdout | Running on local URL: http://127.0.0.1:7860
|
762 |
+
2024-06-02 04:48:51 | INFO | httpx | HTTP Request: GET http://127.0.0.1:7860/startup-events "HTTP/1.1 200 OK"
|
763 |
+
2024-06-02 04:48:51 | INFO | httpx | HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
764 |
+
2024-06-02 04:48:51 | INFO | stdout |
|
765 |
+
2024-06-02 04:48:51 | INFO | stdout | To create a public link, set `share=True` in `launch()`.
|
766 |
+
2024-06-02 04:48:51 | INFO | httpx | HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
767 |
+
2024-06-02 04:48:51 | INFO | stdout | IMPORTANT: You are using gradio version 4.24.0, however version 4.29.0 is available, please upgrade.
|
768 |
+
2024-06-02 04:48:51 | INFO | stdout | --------
|
769 |
+
2024-06-02 04:48:51 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-initiated-analytics/ "HTTP/1.1 200 OK"
|
770 |
+
2024-06-02 04:48:52 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-launched-telemetry/ "HTTP/1.1 200 OK"
|
771 |
+
2024-06-02 04:53:21 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
772 |
+
2024-06-02 04:53:21 | ERROR | stderr | D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\components\dropdown.py:179: UserWarning: The value passed into gr.Dropdown() is not in the list of choices. Please update the list of choices to include: or set allow_custom_value=True.
|
773 |
+
2024-06-02 04:53:21 | ERROR | stderr | warnings.warn(
|
774 |
+
2024-06-02 04:53:22 | INFO | gradio_web_server | add_text. ip: 127.0.0.1. len: 99
|
775 |
+
2024-06-02 04:53:22 | INFO | gradio_web_server | http_bot. ip: 127.0.0.1
|
776 |
+
2024-06-02 04:54:58 | INFO | stdout | In this image, there is a man who appears to be in the middle of throwing a punch. The coordinates for drawing a bounding box around him are [0.35, 0.29, 0.76, 0.8] and his action can be described as "throwing a punch".
|
777 |
+
2024-06-02 04:54:58 | INFO | stdout | ['0.35', '0.29', '0.76', '0.8']
|
778 |
+
2024-06-02 04:54:58 | ERROR | gradio_web_server | incorrect coordinate type
|
779 |
+
2024-06-02 05:15:22 | INFO | gradio_web_server | args: Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
780 |
+
2024-06-02 05:15:22 | INFO | gradio_web_server | Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
781 |
+
2024-06-02 05:15:22 | INFO | httpx | HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
782 |
+
2024-06-02 05:15:22 | INFO | stdout | Running on local URL: http://127.0.0.1:7861
|
783 |
+
2024-06-02 05:15:22 | INFO | httpx | HTTP Request: GET http://127.0.0.1:7861/startup-events "HTTP/1.1 200 OK"
|
784 |
+
2024-06-02 05:15:22 | INFO | httpx | HTTP Request: HEAD http://127.0.0.1:7861/ "HTTP/1.1 200 OK"
|
785 |
+
2024-06-02 05:15:22 | INFO | stdout |
|
786 |
+
2024-06-02 05:15:22 | INFO | stdout | To create a public link, set `share=True` in `launch()`.
|
787 |
+
2024-06-02 05:15:23 | INFO | httpx | HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
788 |
+
2024-06-02 05:15:23 | INFO | stdout | IMPORTANT: You are using gradio version 4.24.0, however version 4.29.0 is available, please upgrade.
|
789 |
+
2024-06-02 05:15:23 | INFO | stdout | --------
|
790 |
+
2024-06-02 05:15:23 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-initiated-analytics/ "HTTP/1.1 200 OK"
|
791 |
+
2024-06-02 05:15:23 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-launched-telemetry/ "HTTP/1.1 200 OK"
|
792 |
+
2024-06-02 05:17:08 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
793 |
+
2024-06-02 05:17:28 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
794 |
+
2024-06-02 05:17:28 | ERROR | stderr | D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\components\dropdown.py:179: UserWarning: The value passed into gr.Dropdown() is not in the list of choices. Please update the list of choices to include: or set allow_custom_value=True.
|
795 |
+
2024-06-02 05:17:28 | ERROR | stderr | warnings.warn(
|
796 |
+
2024-06-02 05:17:30 | INFO | gradio_web_server | add_text. ip: 127.0.0.1. len: 99
|
797 |
+
2024-06-02 05:17:31 | INFO | gradio_web_server | http_bot. ip: 127.0.0.1
|
798 |
+
2024-06-02 05:19:29 | INFO | stdout | In the image, there is a man who appears to be in a state of agitation. He is wearing a black shirt and blue jeans. He has his arms outstretched towards another person, who is also dressed in a black shirt and blue jeans. The man's posture suggests that he might be attempting to push or grab the other person.
|
799 |
+
2024-06-02 05:19:29 | INFO | stdout |
|
800 |
+
2024-06-02 05:19:29 | INFO | stdout | The coordinates for drawing a bounding box around this scene are [0.28, 0.31, 0.79, 0.74]
|
801 |
+
2024-06-02 05:19:29 | INFO | stdout | ['0.28', '0.31', '0.79', '0.74']
|
802 |
+
2024-06-02 05:19:29 | ERROR | gradio_web_server | incorrect coordinate type
|
803 |
+
2024-06-02 05:23:24 | INFO | stdout | Keyboard interruption in main thread... closing server.
|
804 |
+
2024-06-02 05:23:24 | ERROR | stderr | Traceback (most recent call last):
|
805 |
+
2024-06-02 05:23:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2400, in block_thread
|
806 |
+
2024-06-02 05:23:24 | ERROR | stderr | time.sleep(0.1)
|
807 |
+
2024-06-02 05:23:24 | ERROR | stderr | KeyboardInterrupt
|
808 |
+
2024-06-02 05:23:24 | ERROR | stderr |
|
809 |
+
2024-06-02 05:23:24 | ERROR | stderr | During handling of the above exception, another exception occurred:
|
810 |
+
2024-06-02 05:23:24 | ERROR | stderr |
|
811 |
+
2024-06-02 05:23:24 | ERROR | stderr | Traceback (most recent call last):
|
812 |
+
2024-06-02 05:23:24 | ERROR | stderr | File "D:\BLIP\gdio\app.py", line 407, in <module>
|
813 |
+
2024-06-02 05:23:24 | ERROR | stderr | demo = build_demo(args.embed)
|
814 |
+
2024-06-02 05:23:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2307, in launch
|
815 |
+
2024-06-02 05:23:24 | ERROR | stderr | self.block_thread()
|
816 |
+
2024-06-02 05:23:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\blocks.py", line 2404, in block_thread
|
817 |
+
2024-06-02 05:23:24 | ERROR | stderr | self.server.close()
|
818 |
+
2024-06-02 05:23:24 | ERROR | stderr | File "D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\http_server.py", line 68, in close
|
819 |
+
2024-06-02 05:23:24 | ERROR | stderr | self.thread.join()
|
820 |
+
2024-06-02 05:23:24 | ERROR | stderr | File "C:\Users\white\.pyenv\pyenv-win\versions\3.10.5\lib\threading.py", line 1096, in join
|
821 |
+
2024-06-02 05:23:24 | ERROR | stderr | self._wait_for_tstate_lock()
|
822 |
+
2024-06-02 05:23:24 | ERROR | stderr | File "C:\Users\white\.pyenv\pyenv-win\versions\3.10.5\lib\threading.py", line 1116, in _wait_for_tstate_lock
|
823 |
+
2024-06-02 05:23:24 | ERROR | stderr | if lock.acquire(block, timeout):
|
824 |
+
2024-06-02 05:23:24 | ERROR | stderr | KeyboardInterrupt
|
825 |
+
2024-06-02 05:24:08 | INFO | gradio_web_server | args: Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
826 |
+
2024-06-02 05:24:08 | INFO | gradio_web_server | Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
827 |
+
2024-06-02 05:24:09 | INFO | stdout | Running on local URL: http://127.0.0.1:7860
|
828 |
+
2024-06-02 05:24:09 | INFO | httpx | HTTP Request: GET http://127.0.0.1:7860/startup-events "HTTP/1.1 200 OK"
|
829 |
+
2024-06-02 05:24:09 | INFO | httpx | HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
830 |
+
2024-06-02 05:24:09 | INFO | stdout |
|
831 |
+
2024-06-02 05:24:09 | INFO | stdout | To create a public link, set `share=True` in `launch()`.
|
832 |
+
2024-06-02 05:24:09 | INFO | httpx | HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
833 |
+
2024-06-02 05:24:09 | INFO | httpx | HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
834 |
+
2024-06-02 05:24:09 | INFO | httpx | HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
835 |
+
2024-06-02 05:24:09 | INFO | stdout | IMPORTANT: You are using gradio version 4.24.0, however version 4.29.0 is available, please upgrade.
|
836 |
+
2024-06-02 05:24:09 | INFO | stdout | --------
|
837 |
+
2024-06-02 05:24:10 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-initiated-analytics/ "HTTP/1.1 200 OK"
|
838 |
+
2024-06-02 05:24:10 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-launched-telemetry/ "HTTP/1.1 200 OK"
|
839 |
+
2024-06-02 05:24:16 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
840 |
+
2024-06-02 05:24:16 | ERROR | stderr | D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\components\dropdown.py:179: UserWarning: The value passed into gr.Dropdown() is not in the list of choices. Please update the list of choices to include: or set allow_custom_value=True.
|
841 |
+
2024-06-02 05:24:16 | ERROR | stderr | warnings.warn(
|
842 |
+
2024-06-02 05:24:18 | INFO | gradio_web_server | add_text. ip: 127.0.0.1. len: 99
|
843 |
+
2024-06-02 05:24:18 | INFO | gradio_web_server | http_bot. ip: 127.0.0.1
|
844 |
+
2024-06-02 05:27:35 | INFO | stdout | In the image, there is a man who appears to be in the middle of throwing a punch. The other man is holding his hands up in front of him, possibly trying to block the incoming attack. The exact coordinates for drawing a bounding box would depend on the specific details of the image and the positioning of the objects within it. However, based on the description provided, one could estimate that the puncher's hands are located at (0.32, 0.49, 0.41, 0.68) and the blocker's hands are at (0.57, 0.49, 0.65, 0.57). The puncher is likely located at (0.32, 0.3, 0.72, 0.78), while the blocker could be positioned between (0.21, 0.34, 0.49, 0.76) and (0.57, 0.49, 0.75, 0.78).
|
845 |
+
2024-06-02 05:27:35 | INFO | stdout | ['0.32', '0.49', '0.41', '0.68', '0.57', '0.49', '0.65', '0.57', '0.32', '0.3', '0.72', '0.78', '0.21', '0.34', '0.49', '0.76', '0.57', '0.49', '0.75', '0.78']
|
846 |
+
2024-06-02 05:27:35 | ERROR | gradio_web_server | incorrect coordinate type
|
847 |
+
2024-06-02 05:28:03 | INFO | stdout | Keyboard interruption in main thread... closing server.
|
848 |
+
2024-06-02 05:28:49 | INFO | gradio_web_server | args: Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
849 |
+
2024-06-02 05:28:49 | INFO | gradio_web_server | Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
850 |
+
2024-06-02 05:28:50 | INFO | stdout | Running on local URL: http://127.0.0.1:7860
|
851 |
+
2024-06-02 05:28:50 | INFO | httpx | HTTP Request: GET http://127.0.0.1:7860/startup-events "HTTP/1.1 200 OK"
|
852 |
+
2024-06-02 05:28:50 | INFO | httpx | HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
853 |
+
2024-06-02 05:28:50 | INFO | stdout |
|
854 |
+
2024-06-02 05:28:50 | INFO | stdout | To create a public link, set `share=True` in `launch()`.
|
855 |
+
2024-06-02 05:28:50 | INFO | httpx | HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
856 |
+
2024-06-02 05:28:50 | INFO | httpx | HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
857 |
+
2024-06-02 05:28:50 | INFO | httpx | HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
858 |
+
2024-06-02 05:28:50 | INFO | stdout | IMPORTANT: You are using gradio version 4.24.0, however version 4.29.0 is available, please upgrade.
|
859 |
+
2024-06-02 05:28:50 | INFO | stdout | --------
|
860 |
+
2024-06-02 05:28:51 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-initiated-analytics/ "HTTP/1.1 200 OK"
|
861 |
+
2024-06-02 05:28:51 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-launched-telemetry/ "HTTP/1.1 200 OK"
|
862 |
+
2024-06-02 05:28:54 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
863 |
+
2024-06-02 05:28:54 | ERROR | stderr | D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\components\dropdown.py:179: UserWarning: The value passed into gr.Dropdown() is not in the list of choices. Please update the list of choices to include: or set allow_custom_value=True.
|
864 |
+
2024-06-02 05:28:54 | ERROR | stderr | warnings.warn(
|
865 |
+
2024-06-02 05:28:55 | INFO | gradio_web_server | add_text. ip: 127.0.0.1. len: 99
|
866 |
+
2024-06-02 05:28:56 | INFO | gradio_web_server | http_bot. ip: 127.0.0.1
|
867 |
+
2024-06-02 05:31:15 | INFO | stdout | In the image, there is a man who appears to be in the middle of throwing a punch. The other man is holding his arms up in an attempt to block the incoming attack. The exact coordinates for drawing a bounding box would depend on the specific details of the image and the objects within it. However, based on the description provided, we can say that the man throwing the punch is located at (0.36, 0.29, 0.71, 0.8) and the man blocking the punch is positioned at (0.24, 0.35, 0.78, 0.77).
|
868 |
+
2024-06-02 05:31:15 | INFO | stdout | ['0.36', '0.29', '0.71', '0.8', '0.24', '0.35', '0.78', '0.77']
|
869 |
+
2024-06-02 05:31:15 | ERROR | gradio_web_server | incorrect coordinate type
|
870 |
+
2024-06-02 05:33:01 | INFO | stdout | Keyboard interruption in main thread... closing server.
|
871 |
+
2024-06-02 05:34:02 | INFO | gradio_web_server | args: Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
872 |
+
2024-06-02 05:34:02 | INFO | gradio_web_server | Namespace(concurrency_count=16, model_list_mode='reload', share=False, moderate=False, embed=False)
|
873 |
+
2024-06-02 05:34:02 | INFO | httpx | HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
874 |
+
2024-06-02 05:34:02 | INFO | stdout | Running on local URL: http://127.0.0.1:7860
|
875 |
+
2024-06-02 05:34:02 | INFO | httpx | HTTP Request: GET http://127.0.0.1:7860/startup-events "HTTP/1.1 200 OK"
|
876 |
+
2024-06-02 05:34:03 | INFO | httpx | HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
877 |
+
2024-06-02 05:34:03 | INFO | stdout |
|
878 |
+
2024-06-02 05:34:03 | INFO | stdout | To create a public link, set `share=True` in `launch()`.
|
879 |
+
2024-06-02 05:34:03 | INFO | httpx | HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
880 |
+
2024-06-02 05:34:03 | INFO | stdout | IMPORTANT: You are using gradio version 4.24.0, however version 4.29.0 is available, please upgrade.
|
881 |
+
2024-06-02 05:34:03 | INFO | stdout | --------
|
882 |
+
2024-06-02 05:34:03 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-initiated-analytics/ "HTTP/1.1 200 OK"
|
883 |
+
2024-06-02 05:34:03 | INFO | httpx | HTTP Request: POST https://api.gradio.app/gradio-launched-telemetry/ "HTTP/1.1 200 OK"
|
884 |
+
2024-06-02 05:34:15 | INFO | gradio_web_server | load_demo. ip: 127.0.0.1
|
885 |
+
2024-06-02 05:34:15 | ERROR | stderr | D:\IEEESurvive\gdio\.wenv\lib\site-packages\gradio\components\dropdown.py:179: UserWarning: The value passed into gr.Dropdown() is not in the list of choices. Please update the list of choices to include: or set allow_custom_value=True.
|
886 |
+
2024-06-02 05:34:15 | ERROR | stderr | warnings.warn(
|
887 |
+
2024-06-02 05:34:15 | INFO | gradio_web_server | add_text. ip: 127.0.0.1. len: 99
|
888 |
+
2024-06-02 05:34:16 | INFO | gradio_web_server | http_bot. ip: 127.0.0.1
|
889 |
+
2024-06-02 05:37:55 | INFO | stdout | In the image, there is a man who appears to be in the middle of throwing a punch towards another person. The other person is attempting to block the incoming attack with their arms raised. The precise locations of these actions are as follows:
|
890 |
+
2024-06-02 05:37:55 | INFO | stdout |
|
891 |
+
2024-06-02 05:37:55 | INFO | stdout | 1. The man throwing the punch is located at (0.34, 0.29, 0.65, 0.78) and has a bounding box of [0.34, 0.29, 0.65, 0.78]
|
892 |
+
2024-06-02 05:37:55 | INFO | stdout | 2. The person being punched is located at (0.21, 0.33, 0.45, 0.76) and has a bounding box of [0.21, 0.33, 0.45, 0.76]
|
893 |
+
2024-06-02 05:37:55 | INFO | stdout | 3. The person blocking the punch is located at (0.21, 0.33, 0.45, 0.76) and has a bounding box of [0.21, 0.33, 0.45, 0.76]
|
894 |
+
2024-06-02 05:37:55 | INFO | stdout |
|
895 |
+
2024-06-02 05:37:55 | INFO | stdout | Please note that these coordinates are based on the image description provided and may not be entirely accurate due to perspective or other factors.
|
896 |
+
2024-06-02 05:37:55 | INFO | stdout | ['0.34', '0.29', '0.65', '0.78', '0.34', '0.29', '0.65', '0.78', '0.21', '0.33', '0.45', '0.76', '0.21', '0.33', '0.45', '0.76', '0.21', '0.33', '0.45', '0.76', '0.21', '0.33', '0.45', '0.76']
|
897 |
+
2024-06-02 05:37:55 | INFO | stdout | In the image, there is a man who appears to be in the middle of throwing a punch towards another person. The other person is attempting to block the incoming attack with their arms raised. The precise locations of these actions are as follows:
|
898 |
+
2024-06-02 05:37:55 | INFO | stdout |
|
899 |
+
2024-06-02 05:37:55 | INFO | stdout | 1. The man throwing the punch is located at (0.34, 0.29, 0.65, 0.78) and has a bounding box of [0.34, 0.29, 0.65, 0.78]
|
900 |
+
2024-06-02 05:37:55 | INFO | stdout | 2. The person being punched is located at (0.21, 0.33, 0.45, 0.76) and has a bounding box of [0.21, 0.33, 0.45, 0.76]
|
901 |
+
2024-06-02 05:37:55 | INFO | stdout | 3. The person blocking the punch is located at (0.21, 0.33, 0.45, 0.76) and has a bounding box of [0.21, 0.33, 0.45, 0.76]
|
902 |
+
2024-06-02 05:37:55 | INFO | stdout |
|
903 |
+
2024-06-02 05:37:55 | INFO | stdout | Please note that these coordinates are based on the image description provided and may not be entirely accurate due to perspective or other factors. None
|
904 |
+
2024-06-02 05:37:55 | INFO | stdout | (' In the image, there is a man who appears to be in the middle of throwing a punch towards another person. The other person is attempting to block the incoming attack with their arms raised. The precise locations of these actions are as follows:\n\n1. The man throwing the punch is located at (0.34, 0.29, 0.65, 0.78) and has a bounding box of [0.34, 0.29, 0.65, 0.78]\n2. The person being punched is located at (0.21, 0.33, 0.45, 0.76) and has a bounding box of [0.21, 0.33, 0.45, 0.76]\n3. The person blocking the punch is located at (0.21, 0.33, 0.45, 0.76) and has a bounding box of [0.21, 0.33, 0.45, 0.76]\n\nPlease note that these coordinates are based on the image description provided and may not be entirely accurate due to perspective or other factors.\n<image>', <PIL.Image.Image image mode=RGB size=416x416 at 0x205F04DFCD0>, 'Default') None
|
905 |
+
2024-06-02 05:40:35 | INFO | stdout | Keyboard interruption in main thread... closing server.
|
mmproj-model-f16.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eddb6117944f526454c8b35e9d9209a7ee99f6db005f6dd625debb514222795f
|
3 |
+
size 624434336
|
requirements.txt
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate==0.21.0
|
2 |
+
aiofiles==23.2.1
|
3 |
+
aiohttp==3.9.3
|
4 |
+
aiosignal==1.3.1
|
5 |
+
altair==5.3.0
|
6 |
+
annotated-types==0.6.0
|
7 |
+
anyio==4.3.0
|
8 |
+
astor==0.8.1
|
9 |
+
async-timeout==4.0.3
|
10 |
+
attrdict==2.0.1
|
11 |
+
attrs==23.2.0
|
12 |
+
Babel==2.14.0
|
13 |
+
bce-python-sdk==0.9.6
|
14 |
+
beautifulsoup4==4.12.3
|
15 |
+
bitsandbytes==0.41.0
|
16 |
+
blinker==1.7.0
|
17 |
+
cachetools==5.3.3
|
18 |
+
certifi==2024.2.2
|
19 |
+
charset-normalizer==3.3.2
|
20 |
+
click==8.1.7
|
21 |
+
cmake==3.29.0.1
|
22 |
+
contourpy==1.2.0
|
23 |
+
cssselect==1.2.0
|
24 |
+
cssutils==2.10.2
|
25 |
+
cycler==0.12.1
|
26 |
+
Cython==3.0.10
|
27 |
+
decorator==5.1.1
|
28 |
+
# deepspeed==0.14.0
|
29 |
+
diffusers==0.26.3
|
30 |
+
distro==1.9.0
|
31 |
+
einops==0.6.1
|
32 |
+
einops-exts==0.0.4
|
33 |
+
et-xmlfile==1.1.0
|
34 |
+
exceptiongroup==1.2.0
|
35 |
+
fastapi==0.110.0
|
36 |
+
ffmpy==0.3.2
|
37 |
+
filelock==3.13.3
|
38 |
+
fire==0.6.0
|
39 |
+
# flash-attn==2.5.6
|
40 |
+
Flask==3.0.2
|
41 |
+
flask-babel==4.0.0
|
42 |
+
fonttools==4.50.0
|
43 |
+
frozenlist==1.4.1
|
44 |
+
fsspec==2024.3.1
|
45 |
+
ftfy==6.2.0
|
46 |
+
future==1.0.0
|
47 |
+
gradio==4.24.0
|
48 |
+
gradio_client==0.14.0
|
49 |
+
h11==0.14.0
|
50 |
+
hjson==3.1.0
|
51 |
+
httpcore==1.0.5
|
52 |
+
httpx==0.27.0
|
53 |
+
huggingface-hub==0.22.2
|
54 |
+
idna==3.6
|
55 |
+
imageio==2.34.0
|
56 |
+
imgaug==0.4.0
|
57 |
+
importlib_metadata==7.1.0
|
58 |
+
importlib_resources==6.4.0
|
59 |
+
itsdangerous==2.1.2
|
60 |
+
Jinja2==3.1.3
|
61 |
+
joblib==1.3.2
|
62 |
+
jsonschema==4.21.1
|
63 |
+
jsonschema-specifications==2023.12.1
|
64 |
+
kiwisolver==1.4.5
|
65 |
+
lazy_loader==0.3
|
66 |
+
linkify-it-py==2.0.3
|
67 |
+
lit==18.1.2
|
68 |
+
lmdb==1.4.1
|
69 |
+
lxml==5.2.0
|
70 |
+
markdown-it-py==2.2.0
|
71 |
+
markdown2==2.4.13
|
72 |
+
MarkupSafe==2.1.5
|
73 |
+
matplotlib==3.8.3
|
74 |
+
mdit-py-plugins==0.3.3
|
75 |
+
mdurl==0.1.2
|
76 |
+
mpmath==1.3.0
|
77 |
+
multidict==6.0.5
|
78 |
+
networkx==3.2.1
|
79 |
+
ninja==1.11.1.1
|
80 |
+
numpy==1.26.4
|
81 |
+
open-clip-torch==2.24.0
|
82 |
+
openai==1.16.0
|
83 |
+
opencv-contrib-python==4.6.0.66
|
84 |
+
opencv-python==4.6.0.66
|
85 |
+
opencv-python-headless==4.9.0.80
|
86 |
+
openpyxl==3.1.2
|
87 |
+
opt-einsum==3.3.0
|
88 |
+
orjson==3.10.0
|
89 |
+
packaging==24.0
|
90 |
+
# paddleocr==2.7.0.3
|
91 |
+
# paddlepaddle==2.5.2
|
92 |
+
pandas==2.2.1
|
93 |
+
pdf2docx==0.5.8
|
94 |
+
peft==0.4.0
|
95 |
+
pillow==10.3.0
|
96 |
+
premailer==3.10.0
|
97 |
+
protobuf==5.26.1
|
98 |
+
psutil==5.9.8
|
99 |
+
py-cpuinfo==9.0.0
|
100 |
+
pyclipper==1.3.0.post5
|
101 |
+
pycryptodome==3.20.0
|
102 |
+
pydantic==2.6.4
|
103 |
+
pydantic_core==2.16.3
|
104 |
+
pydub==0.25.1
|
105 |
+
Pygments==2.17.2
|
106 |
+
PyMuPDF==1.20.2
|
107 |
+
PyMuPDFb==1.24.0
|
108 |
+
pynvml==11.5.0
|
109 |
+
pyparsing==3.1.2
|
110 |
+
python-dateutil==2.9.0.post0
|
111 |
+
python-docx==1.1.0
|
112 |
+
python-multipart==0.0.9
|
113 |
+
pytz==2024.1
|
114 |
+
PyYAML==6.0.1
|
115 |
+
rapidfuzz==3.7.0
|
116 |
+
rarfile==4.1
|
117 |
+
referencing==0.34.0
|
118 |
+
regex==2023.12.25
|
119 |
+
requests==2.31.0
|
120 |
+
rich==13.7.1
|
121 |
+
rpds-py==0.18.0
|
122 |
+
ruff==0.3.5
|
123 |
+
safetensors==0.4.2
|
124 |
+
scikit-image==0.22.0
|
125 |
+
scikit-learn==1.2.2
|
126 |
+
scipy==1.12.0
|
127 |
+
semantic-version==2.10.0
|
128 |
+
sentencepiece==0.1.99
|
129 |
+
shapely==2.0.3
|
130 |
+
shellingham==1.5.4
|
131 |
+
shortuuid==1.0.13
|
132 |
+
six==1.16.0
|
133 |
+
sniffio==1.3.1
|
134 |
+
soupsieve==2.5
|
135 |
+
starlette==0.36.3
|
136 |
+
svgwrite==1.4.3
|
137 |
+
sympy==1.12
|
138 |
+
termcolor==2.4.0
|
139 |
+
threadpoolctl==3.4.0
|
140 |
+
tifffile==2024.2.12
|
141 |
+
timm==0.9.16
|
142 |
+
tokenizers==0.15.0
|
143 |
+
tomlkit==0.12.0
|
144 |
+
toolz==0.12.1
|
145 |
+
torch==2.0.1
|
146 |
+
torchvision==0.15.2
|
147 |
+
tqdm==4.66.2
|
148 |
+
# transformers==4.36.2
|
149 |
+
# triton==2.0.0
|
150 |
+
typer==0.12.0
|
151 |
+
typer-cli==0.12.0
|
152 |
+
typer-slim==0.12.0
|
153 |
+
typing_extensions==4.10.0
|
154 |
+
tzdata==2024.1
|
155 |
+
uc-micro-py==1.0.3
|
156 |
+
urllib3==2.2.1
|
157 |
+
uvicorn==0.29.0
|
158 |
+
visualdl==2.5.3
|
159 |
+
wavedrom==2.0.3.post3
|
160 |
+
wcwidth==0.2.13
|
161 |
+
websockets==11.0.3
|
162 |
+
Werkzeug==3.0.2
|
163 |
+
yarl==1.9.4
|
164 |
+
zipp==3.18.1
|
165 |
+
llama-cpp-python==0.2.59
|
test.py
ADDED
@@ -0,0 +1,479 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import datetime
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
import time
|
6 |
+
|
7 |
+
import gradio as gr
|
8 |
+
import requests
|
9 |
+
|
10 |
+
from minigemini.conversation import (default_conversation, conv_templates,
|
11 |
+
SeparatorStyle)
|
12 |
+
from minigemini.constants import LOGDIR
|
13 |
+
from minigemini.utils import (build_logger, server_error_msg,
|
14 |
+
violates_moderation, moderation_msg)
|
15 |
+
import hashlib
|
16 |
+
|
17 |
+
|
18 |
+
logger = build_logger("gradio_web_server", "gradio_web_server.log")
|
19 |
+
|
20 |
+
headers = {"User-Agent": "Mini-Gemini Client"}
|
21 |
+
|
22 |
+
no_change_btn = gr.Button()
|
23 |
+
enable_btn = gr.Button(interactive=True)
|
24 |
+
disable_btn = gr.Button(interactive=False)
|
25 |
+
|
26 |
+
priority = {
|
27 |
+
"vicuna-13b": "aaaaaaa",
|
28 |
+
"koala-13b": "aaaaaab",
|
29 |
+
}
|
30 |
+
|
31 |
+
|
32 |
+
def get_conv_log_filename():
|
33 |
+
t = datetime.datetime.now()
|
34 |
+
name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-conv.json")
|
35 |
+
return name
|
36 |
+
|
37 |
+
|
38 |
+
def get_model_list():
|
39 |
+
ret = requests.post(args.controller_url + "/refresh_all_workers")
|
40 |
+
assert ret.status_code == 200
|
41 |
+
ret = requests.post(args.controller_url + "/list_models")
|
42 |
+
models = ret.json()["models"]
|
43 |
+
models.sort(key=lambda x: priority.get(x, x))
|
44 |
+
logger.info(f"Models: {models}")
|
45 |
+
return models
|
46 |
+
|
47 |
+
|
48 |
+
get_window_url_params = """
|
49 |
+
function() {
|
50 |
+
const params = new URLSearchParams(window.location.search);
|
51 |
+
url_params = Object.fromEntries(params);
|
52 |
+
console.log(url_params);
|
53 |
+
return url_params;
|
54 |
+
}
|
55 |
+
"""
|
56 |
+
|
57 |
+
|
58 |
+
def load_demo(url_params, request: gr.Request):
|
59 |
+
logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}")
|
60 |
+
|
61 |
+
dropdown_update = gr.Dropdown(visible=True)
|
62 |
+
if "model" in url_params:
|
63 |
+
model = url_params["model"]
|
64 |
+
if model in models:
|
65 |
+
dropdown_update = gr.Dropdown(value=model, visible=True)
|
66 |
+
|
67 |
+
state = default_conversation.copy()
|
68 |
+
return state, dropdown_update
|
69 |
+
|
70 |
+
|
71 |
+
def load_demo_refresh_model_list(request: gr.Request):
|
72 |
+
logger.info(f"load_demo. ip: {request.client.host}")
|
73 |
+
models = get_model_list()
|
74 |
+
state = default_conversation.copy()
|
75 |
+
dropdown_update = gr.Dropdown(
|
76 |
+
choices=models,
|
77 |
+
value=models[0] if len(models) > 0 else ""
|
78 |
+
)
|
79 |
+
return state, dropdown_update
|
80 |
+
|
81 |
+
|
82 |
+
def vote_last_response(state, vote_type, model_selector, request: gr.Request):
|
83 |
+
with open(get_conv_log_filename(), "a") as fout:
|
84 |
+
data = {
|
85 |
+
"tstamp": round(time.time(), 4),
|
86 |
+
"type": vote_type,
|
87 |
+
"model": model_selector,
|
88 |
+
"state": state.dict(),
|
89 |
+
"ip": request.client.host,
|
90 |
+
}
|
91 |
+
fout.write(json.dumps(data) + "\n")
|
92 |
+
|
93 |
+
|
94 |
+
def upvote_last_response(state, model_selector, request: gr.Request):
|
95 |
+
logger.info(f"upvote. ip: {request.client.host}")
|
96 |
+
vote_last_response(state, "upvote", model_selector, request)
|
97 |
+
return ("",) + (disable_btn,) * 3
|
98 |
+
|
99 |
+
|
100 |
+
def downvote_last_response(state, model_selector, request: gr.Request):
|
101 |
+
logger.info(f"downvote. ip: {request.client.host}")
|
102 |
+
vote_last_response(state, "downvote", model_selector, request)
|
103 |
+
return ("",) + (disable_btn,) * 3
|
104 |
+
|
105 |
+
|
106 |
+
def flag_last_response(state, model_selector, request: gr.Request):
|
107 |
+
logger.info(f"flag. ip: {request.client.host}")
|
108 |
+
vote_last_response(state, "flag", model_selector, request)
|
109 |
+
return ("",) + (disable_btn,) * 3
|
110 |
+
|
111 |
+
|
112 |
+
def regenerate(state, image_process_mode, request: gr.Request):
|
113 |
+
logger.info(f"regenerate. ip: {request.client.host}")
|
114 |
+
state.messages[-1][-1] = None
|
115 |
+
prev_human_msg = state.messages[-2]
|
116 |
+
if type(prev_human_msg[1]) in (tuple, list):
|
117 |
+
prev_human_msg[1] = (*prev_human_msg[1][:2], image_process_mode)
|
118 |
+
state.skip_next = False
|
119 |
+
return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 5
|
120 |
+
|
121 |
+
|
122 |
+
def clear_history(request: gr.Request):
|
123 |
+
logger.info(f"clear_history. ip: {request.client.host}")
|
124 |
+
state = default_conversation.copy()
|
125 |
+
return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 5
|
126 |
+
|
127 |
+
|
128 |
+
def add_text(state, text, image, image_process_mode, request: gr.Request):
|
129 |
+
logger.info(f"add_text. ip: {request.client.host}. len: {len(text)}")
|
130 |
+
if len(text) <= 0 and image is None:
|
131 |
+
state.skip_next = True
|
132 |
+
return (state, state.to_gradio_chatbot(), "", None) + (no_change_btn,) * 5
|
133 |
+
if args.moderate:
|
134 |
+
flagged = violates_moderation(text)
|
135 |
+
if flagged:
|
136 |
+
state.skip_next = True
|
137 |
+
return (state, state.to_gradio_chatbot(), moderation_msg, None) + (
|
138 |
+
no_change_btn,) * 5
|
139 |
+
|
140 |
+
text = text[:1536] # Hard cut-off
|
141 |
+
if image is not None:
|
142 |
+
text = text[:1200] # Hard cut-off for images
|
143 |
+
if '<image>' not in text:
|
144 |
+
# text = '<Image><image></Image>' + text
|
145 |
+
text = text + '\n<image>'
|
146 |
+
text = (text, image, image_process_mode)
|
147 |
+
if len(state.get_images(return_pil=True)) > 0:
|
148 |
+
state = default_conversation.copy()
|
149 |
+
state.append_message(state.roles[0], text)
|
150 |
+
state.append_message(state.roles[1], None)
|
151 |
+
state.skip_next = False
|
152 |
+
return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 5
|
153 |
+
|
154 |
+
|
155 |
+
def http_bot(state, model_selector, temperature, top_p, max_new_tokens, gen_image, use_ocr, request: gr.Request):
|
156 |
+
logger.info(f"http_bot. ip: {request.client.host}")
|
157 |
+
start_tstamp = time.time()
|
158 |
+
model_name = model_selector
|
159 |
+
|
160 |
+
if state.skip_next:
|
161 |
+
# This generate call is skipped due to invalid inputs
|
162 |
+
yield (state, state.to_gradio_chatbot()) + (no_change_btn,) * 5
|
163 |
+
return
|
164 |
+
|
165 |
+
if len(state.messages) == state.offset + 2:
|
166 |
+
# First round of conversation
|
167 |
+
if "mini-gemini" in model_name.lower():
|
168 |
+
if '8x7b' in model_name.lower():
|
169 |
+
template_name = "mistral_instruct"
|
170 |
+
elif '34b' in model_name.lower():
|
171 |
+
template_name = "chatml_direct"
|
172 |
+
elif '2b' in model_name.lower():
|
173 |
+
template_name = "gemma"
|
174 |
+
else:
|
175 |
+
template_name = "vicuna_v1"
|
176 |
+
else:
|
177 |
+
template_name = "vicuna_v1"
|
178 |
+
|
179 |
+
new_state = conv_templates[template_name].copy()
|
180 |
+
new_state.append_message(new_state.roles[0], state.messages[-2][1])
|
181 |
+
new_state.append_message(new_state.roles[1], None)
|
182 |
+
state = new_state
|
183 |
+
|
184 |
+
# Query worker address
|
185 |
+
controller_url = args.controller_url
|
186 |
+
ret = requests.post(controller_url + "/get_worker_address",
|
187 |
+
json={"model": model_name})
|
188 |
+
worker_addr = ret.json()["address"]
|
189 |
+
logger.info(f"model_name: {model_name}, worker_addr: {worker_addr}")
|
190 |
+
|
191 |
+
# No available worker
|
192 |
+
if worker_addr == "":
|
193 |
+
state.messages[-1][-1] = server_error_msg
|
194 |
+
yield (state, state.to_gradio_chatbot(), disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
|
195 |
+
return
|
196 |
+
|
197 |
+
# Construct prompt
|
198 |
+
prompt = state.get_prompt()
|
199 |
+
|
200 |
+
all_images = state.get_images(return_pil=True)
|
201 |
+
all_image_hash = [hashlib.md5(image.tobytes()).hexdigest() for image in all_images]
|
202 |
+
for image, hash in zip(all_images, all_image_hash):
|
203 |
+
t = datetime.datetime.now()
|
204 |
+
filename = os.path.join(LOGDIR, "serve_images", f"{t.year}-{t.month:02d}-{t.day:02d}", f"{hash}.jpg")
|
205 |
+
if not os.path.isfile(filename):
|
206 |
+
os.makedirs(os.path.dirname(filename), exist_ok=True)
|
207 |
+
image.save(filename)
|
208 |
+
|
209 |
+
# Generate Image
|
210 |
+
if 'generate' in prompt.lower():
|
211 |
+
gen_image = 'Yes'
|
212 |
+
elif 'show me one idea of what i could make with this?' in prompt.lower() and len(all_images) == 1:
|
213 |
+
h, w = all_images[0].size
|
214 |
+
if h == 922 and w == 672:
|
215 |
+
gen_image = 'Yes'
|
216 |
+
|
217 |
+
# Make requests
|
218 |
+
pload = {
|
219 |
+
"model": model_name,
|
220 |
+
"prompt": prompt,
|
221 |
+
"temperature": float(temperature),
|
222 |
+
"top_p": float(top_p),
|
223 |
+
"max_new_tokens": min(int(max_new_tokens), 1536),
|
224 |
+
"stop": state.sep if state.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.MPT] else state.sep2,
|
225 |
+
"images": f'List of {len(state.get_images())} images: {all_image_hash}',
|
226 |
+
"gen_image": bool(gen_image == 'Yes'),
|
227 |
+
"use_ocr": bool(use_ocr == 'Yes'),
|
228 |
+
}
|
229 |
+
logger.info(f"==== request ====\n{pload}")
|
230 |
+
|
231 |
+
pload['images'] = state.get_images()
|
232 |
+
|
233 |
+
state.messages[-1][-1] = "▌"
|
234 |
+
yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
|
235 |
+
|
236 |
+
try:
|
237 |
+
# Stream output
|
238 |
+
response = requests.post(worker_addr + "/worker_generate_stream",
|
239 |
+
headers=headers, json=pload, stream=True, timeout=30)
|
240 |
+
for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
|
241 |
+
if chunk:
|
242 |
+
data = json.loads(chunk.decode())
|
243 |
+
if data["error_code"] == 0:
|
244 |
+
if 'image' not in data.keys():
|
245 |
+
output = data["text"][len(prompt):].strip()
|
246 |
+
state.messages[-1][-1] = output + "▌"
|
247 |
+
else:
|
248 |
+
output = (data["text"][len(prompt):].strip(), data["image"])
|
249 |
+
state.messages[-1][-1] = output
|
250 |
+
yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
|
251 |
+
else:
|
252 |
+
output = data["text"] + f" (error_code: {data['error_code']})"
|
253 |
+
state.messages[-1][-1] = output
|
254 |
+
yield (state, state.to_gradio_chatbot()) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
|
255 |
+
return
|
256 |
+
time.sleep(0.03)
|
257 |
+
except requests.exceptions.RequestException as e:
|
258 |
+
state.messages[-1][-1] = server_error_msg
|
259 |
+
yield (state, state.to_gradio_chatbot()) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
|
260 |
+
return
|
261 |
+
|
262 |
+
if type(state.messages[-1][-1]) is not tuple:
|
263 |
+
state.messages[-1][-1] = state.messages[-1][-1][:-1]
|
264 |
+
yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5
|
265 |
+
|
266 |
+
finish_tstamp = time.time()
|
267 |
+
logger.info(f"{output}")
|
268 |
+
|
269 |
+
with open(get_conv_log_filename(), "a") as fout:
|
270 |
+
data = {
|
271 |
+
"tstamp": round(finish_tstamp, 4),
|
272 |
+
"type": "chat",
|
273 |
+
"model": model_name,
|
274 |
+
"start": round(start_tstamp, 4),
|
275 |
+
"finish": round(finish_tstamp, 4),
|
276 |
+
"state": state.dict(),
|
277 |
+
"images": all_image_hash,
|
278 |
+
"ip": request.client.host,
|
279 |
+
}
|
280 |
+
fout.write(json.dumps(data) + "\n")
|
281 |
+
|
282 |
+
title_markdown = ("""
|
283 |
+
# Mini-Gemini: Mining the Potential of Multi-modality Vision Language Models
|
284 |
+
""")
|
285 |
+
|
286 |
+
tos_markdown = ("""
|
287 |
+
### Terms of use
|
288 |
+
By using this service, users are required to agree to the following terms:
|
289 |
+
The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research.
|
290 |
+
Please click the "Flag" button if you get any inappropriate answer! We will collect those to keep improving our moderator.
|
291 |
+
For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
|
292 |
+
""")
|
293 |
+
|
294 |
+
|
295 |
+
learn_more_markdown = ("""
|
296 |
+
### License
|
297 |
+
The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation.
|
298 |
+
""")
|
299 |
+
|
300 |
+
block_css = """
|
301 |
+
|
302 |
+
#buttons button {
|
303 |
+
min-width: min(120px,100%);
|
304 |
+
}
|
305 |
+
|
306 |
+
"""
|
307 |
+
|
308 |
+
def build_demo(embed_mode, cur_dir=None, concurrency_count=10):
|
309 |
+
textbox = gr.Textbox(show_label=False, placeholder="Enter text and press ENTER", container=False)
|
310 |
+
with gr.Blocks(title="Mini-Gemini", theme=gr.themes.Default(), css=block_css) as demo:
|
311 |
+
state = gr.State()
|
312 |
+
|
313 |
+
if not embed_mode:
|
314 |
+
gr.Markdown(title_markdown)
|
315 |
+
|
316 |
+
with gr.Row():
|
317 |
+
with gr.Column(scale=3):
|
318 |
+
with gr.Row(elem_id="model_selector_row"):
|
319 |
+
model_selector = gr.Dropdown(
|
320 |
+
choices=models,
|
321 |
+
value=models[0] if len(models) > 0 else "",
|
322 |
+
interactive=True,
|
323 |
+
show_label=False,
|
324 |
+
container=False)
|
325 |
+
|
326 |
+
imagebox = gr.Image(type="pil")
|
327 |
+
image_process_mode = gr.Radio(
|
328 |
+
["Crop", "Resize", "Pad", "Default"],
|
329 |
+
value="Default",
|
330 |
+
label="Preprocess for non-square image", visible=False)
|
331 |
+
|
332 |
+
if cur_dir is None:
|
333 |
+
cur_dir = os.path.dirname(os.path.abspath(__file__))
|
334 |
+
gr.Examples(examples=[
|
335 |
+
[f"{cur_dir}/examples/monday.jpg", "Explain why this meme is funny, and generate a picture when the weekend coming."],
|
336 |
+
[f"{cur_dir}/examples/woolen.png", "Show me one idea of what I could make with this?"],
|
337 |
+
[f"{cur_dir}/examples/extreme_ironing.jpg", "What is unusual about this image?"],
|
338 |
+
[f"{cur_dir}/examples/waterview.jpg", "What are the things I should be cautious about when I visit here?"],
|
339 |
+
], inputs=[imagebox, textbox])
|
340 |
+
|
341 |
+
with gr.Accordion("Function", open=True) as parameter_row:
|
342 |
+
gen_image = gr.Radio(choices=['Yes', 'No'], value='No', interactive=True, label="Generate Image")
|
343 |
+
use_ocr = gr.Radio(choices=['Yes', 'No'], value='Yes', interactive=True, label="Use OCR")
|
344 |
+
|
345 |
+
with gr.Accordion("Parameters", open=False) as parameter_row:
|
346 |
+
temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.2, step=0.1, interactive=True, label="Temperature",)
|
347 |
+
top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.7, step=0.1, interactive=True, label="Top P",)
|
348 |
+
max_output_tokens = gr.Slider(minimum=0, maximum=1024, value=512, step=64, interactive=True, label="Max output tokens",)
|
349 |
+
|
350 |
+
with gr.Column(scale=7):
|
351 |
+
chatbot = gr.Chatbot(
|
352 |
+
elem_id="chatbot",
|
353 |
+
label="Mini-Gemini Chatbot",
|
354 |
+
height=940,
|
355 |
+
layout="panel",
|
356 |
+
)
|
357 |
+
with gr.Row():
|
358 |
+
with gr.Column(scale=7):
|
359 |
+
textbox.render()
|
360 |
+
with gr.Column(scale=1, min_width=50):
|
361 |
+
submit_btn = gr.Button(value="Send", variant="primary")
|
362 |
+
with gr.Row(elem_id="buttons") as button_row:
|
363 |
+
upvote_btn = gr.Button(value="👍 Upvote", interactive=False)
|
364 |
+
downvote_btn = gr.Button(value="👎 Downvote", interactive=False)
|
365 |
+
flag_btn = gr.Button(value="⚠️ Flag", interactive=False)
|
366 |
+
#stop_btn = gr.Button(value="⏹️ Stop Generation", interactive=False)
|
367 |
+
regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
|
368 |
+
clear_btn = gr.Button(value="🗑️ Clear", interactive=False)
|
369 |
+
|
370 |
+
if not embed_mode:
|
371 |
+
gr.Markdown(function_markdown)
|
372 |
+
gr.Markdown(tos_markdown)
|
373 |
+
gr.Markdown(learn_more_markdown)
|
374 |
+
url_params = gr.JSON(visible=False)
|
375 |
+
|
376 |
+
# Register listeners
|
377 |
+
btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
|
378 |
+
upvote_btn.click(
|
379 |
+
upvote_last_response,
|
380 |
+
[state, model_selector],
|
381 |
+
[textbox, upvote_btn, downvote_btn, flag_btn]
|
382 |
+
)
|
383 |
+
downvote_btn.click(
|
384 |
+
downvote_last_response,
|
385 |
+
[state, model_selector],
|
386 |
+
[textbox, upvote_btn, downvote_btn, flag_btn]
|
387 |
+
)
|
388 |
+
flag_btn.click(
|
389 |
+
flag_last_response,
|
390 |
+
[state, model_selector],
|
391 |
+
[textbox, upvote_btn, downvote_btn, flag_btn]
|
392 |
+
)
|
393 |
+
|
394 |
+
regenerate_btn.click(
|
395 |
+
regenerate,
|
396 |
+
[state, image_process_mode],
|
397 |
+
[state, chatbot, textbox, imagebox] + btn_list
|
398 |
+
).then(
|
399 |
+
http_bot,
|
400 |
+
[state, model_selector, temperature, top_p, max_output_tokens, gen_image, use_ocr],
|
401 |
+
[state, chatbot] + btn_list,
|
402 |
+
concurrency_limit=concurrency_count
|
403 |
+
)
|
404 |
+
|
405 |
+
clear_btn.click(
|
406 |
+
clear_history,
|
407 |
+
None,
|
408 |
+
[state, chatbot, textbox, imagebox] + btn_list,
|
409 |
+
queue=False
|
410 |
+
)
|
411 |
+
|
412 |
+
textbox.submit(
|
413 |
+
add_text,
|
414 |
+
[state, textbox, imagebox, image_process_mode],
|
415 |
+
[state, chatbot, textbox, imagebox] + btn_list,
|
416 |
+
queue=False
|
417 |
+
).then(
|
418 |
+
http_bot,
|
419 |
+
[state, model_selector, temperature, top_p, max_output_tokens, gen_image, use_ocr],
|
420 |
+
[state, chatbot] + btn_list,
|
421 |
+
concurrency_limit=concurrency_count
|
422 |
+
)
|
423 |
+
|
424 |
+
submit_btn.click(
|
425 |
+
add_text,
|
426 |
+
[state, textbox, imagebox, image_process_mode],
|
427 |
+
[state, chatbot, textbox, imagebox] + btn_list
|
428 |
+
).then(
|
429 |
+
http_bot,
|
430 |
+
[state, model_selector, temperature, top_p, max_output_tokens, gen_image, use_ocr],
|
431 |
+
[state, chatbot] + btn_list,
|
432 |
+
concurrency_limit=concurrency_count
|
433 |
+
)
|
434 |
+
|
435 |
+
if args.model_list_mode == "once":
|
436 |
+
demo.load(
|
437 |
+
load_demo,
|
438 |
+
[url_params],
|
439 |
+
[state, model_selector],
|
440 |
+
_js=get_window_url_params
|
441 |
+
)
|
442 |
+
elif args.model_list_mode == "reload":
|
443 |
+
demo.load(
|
444 |
+
load_demo_refresh_model_list,
|
445 |
+
None,
|
446 |
+
[state, model_selector],
|
447 |
+
queue=False
|
448 |
+
)
|
449 |
+
else:
|
450 |
+
raise ValueError(f"Unknown model list mode: {args.model_list_mode}")
|
451 |
+
|
452 |
+
return demo
|
453 |
+
|
454 |
+
|
455 |
+
if __name__ == "__main__":
|
456 |
+
parser = argparse.ArgumentParser()
|
457 |
+
parser.add_argument("--host", type=str, default="0.0.0.0")
|
458 |
+
parser.add_argument("--port", type=int)
|
459 |
+
parser.add_argument("--controller-url", type=str, default="http://localhost:21001")
|
460 |
+
parser.add_argument("--concurrency-count", type=int, default=16)
|
461 |
+
parser.add_argument("--model-list-mode", type=str, default="once",
|
462 |
+
choices=["once", "reload"])
|
463 |
+
parser.add_argument("--share", action="store_true")
|
464 |
+
parser.add_argument("--moderate", action="store_true")
|
465 |
+
parser.add_argument("--embed", action="store_true")
|
466 |
+
args = parser.parse_args()
|
467 |
+
logger.info(f"args: {args}")
|
468 |
+
|
469 |
+
models = get_model_list()
|
470 |
+
|
471 |
+
logger.info(args)
|
472 |
+
demo = build_demo(args.embed, concurrency_count=args.concurrency_count)
|
473 |
+
demo.queue(
|
474 |
+
api_open=False
|
475 |
+
).launch(
|
476 |
+
server_name=args.host,
|
477 |
+
server_port=args.port,
|
478 |
+
share=args.share
|
479 |
+
)
|
utils.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datetime
|
2 |
+
import logging
|
3 |
+
import logging.handlers
|
4 |
+
import os
|
5 |
+
import sys
|
6 |
+
|
7 |
+
import requests
|
8 |
+
|
9 |
+
from constants import LOGDIR
|
10 |
+
|
11 |
+
server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
|
12 |
+
moderation_msg = "YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES. PLEASE TRY AGAIN."
|
13 |
+
|
14 |
+
handler = None
|
15 |
+
|
16 |
+
|
17 |
+
def build_logger(logger_name, logger_filename):
|
18 |
+
global handler
|
19 |
+
|
20 |
+
formatter = logging.Formatter(
|
21 |
+
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
|
22 |
+
datefmt="%Y-%m-%d %H:%M:%S",
|
23 |
+
)
|
24 |
+
|
25 |
+
# Set the format of root handlers
|
26 |
+
if not logging.getLogger().handlers:
|
27 |
+
logging.basicConfig(level=logging.INFO)
|
28 |
+
logging.getLogger().handlers[0].setFormatter(formatter)
|
29 |
+
|
30 |
+
# Redirect stdout and stderr to loggers
|
31 |
+
stdout_logger = logging.getLogger("stdout")
|
32 |
+
stdout_logger.setLevel(logging.INFO)
|
33 |
+
sl = StreamToLogger(stdout_logger, logging.INFO)
|
34 |
+
sys.stdout = sl
|
35 |
+
|
36 |
+
stderr_logger = logging.getLogger("stderr")
|
37 |
+
stderr_logger.setLevel(logging.ERROR)
|
38 |
+
sl = StreamToLogger(stderr_logger, logging.ERROR)
|
39 |
+
sys.stderr = sl
|
40 |
+
|
41 |
+
# Get logger
|
42 |
+
logger = logging.getLogger(logger_name)
|
43 |
+
logger.setLevel(logging.INFO)
|
44 |
+
|
45 |
+
# Add a file handler for all loggers
|
46 |
+
if handler is None:
|
47 |
+
os.makedirs(LOGDIR, exist_ok=True)
|
48 |
+
filename = os.path.join(LOGDIR, logger_filename)
|
49 |
+
handler = logging.handlers.TimedRotatingFileHandler(
|
50 |
+
filename, when='D', utc=True, encoding='UTF-8')
|
51 |
+
handler.setFormatter(formatter)
|
52 |
+
|
53 |
+
for name, item in logging.root.manager.loggerDict.items():
|
54 |
+
if isinstance(item, logging.Logger):
|
55 |
+
item.addHandler(handler)
|
56 |
+
|
57 |
+
return logger
|
58 |
+
|
59 |
+
|
60 |
+
class StreamToLogger(object):
|
61 |
+
"""
|
62 |
+
Fake file-like stream object that redirects writes to a logger instance.
|
63 |
+
"""
|
64 |
+
def __init__(self, logger, log_level=logging.INFO):
|
65 |
+
self.terminal = sys.stdout
|
66 |
+
self.logger = logger
|
67 |
+
self.log_level = log_level
|
68 |
+
self.linebuf = ''
|
69 |
+
|
70 |
+
def __getattr__(self, attr):
|
71 |
+
return getattr(self.terminal, attr)
|
72 |
+
|
73 |
+
def write(self, buf):
|
74 |
+
temp_linebuf = self.linebuf + buf
|
75 |
+
self.linebuf = ''
|
76 |
+
for line in temp_linebuf.splitlines(True):
|
77 |
+
# From the io.TextIOWrapper docs:
|
78 |
+
# On output, if newline is None, any '\n' characters written
|
79 |
+
# are translated to the system default line separator.
|
80 |
+
# By default sys.stdout.write() expects '\n' newlines and then
|
81 |
+
# translates them so this is still cross platform.
|
82 |
+
if line[-1] == '\n':
|
83 |
+
self.logger.log(self.log_level, line.rstrip())
|
84 |
+
else:
|
85 |
+
self.linebuf += line
|
86 |
+
|
87 |
+
def flush(self):
|
88 |
+
if self.linebuf != '':
|
89 |
+
self.logger.log(self.log_level, self.linebuf.rstrip())
|
90 |
+
self.linebuf = ''
|
91 |
+
|
92 |
+
|
93 |
+
def disable_torch_init():
|
94 |
+
"""
|
95 |
+
Disable the redundant torch default initialization to accelerate model creation.
|
96 |
+
"""
|
97 |
+
import torch
|
98 |
+
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
|
99 |
+
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
|
100 |
+
|
101 |
+
|
102 |
+
def violates_moderation(text):
|
103 |
+
"""
|
104 |
+
Check whether the text violates OpenAI moderation API.
|
105 |
+
"""
|
106 |
+
url = "https://api.openai.com/v1/moderations"
|
107 |
+
headers = {"Content-Type": "application/json",
|
108 |
+
"Authorization": "Bearer " + os.environ["OPENAI_API_KEY"]}
|
109 |
+
text = text.replace("\n", "")
|
110 |
+
data = "{" + '"input": ' + f'"{text}"' + "}"
|
111 |
+
data = data.encode("utf-8")
|
112 |
+
try:
|
113 |
+
ret = requests.post(url, headers=headers, data=data, timeout=5)
|
114 |
+
flagged = ret.json()["results"][0]["flagged"]
|
115 |
+
except requests.exceptions.RequestException as e:
|
116 |
+
flagged = False
|
117 |
+
except KeyError as e:
|
118 |
+
flagged = False
|
119 |
+
|
120 |
+
return flagged
|
121 |
+
|
122 |
+
|
123 |
+
def pretty_print_semaphore(semaphore):
|
124 |
+
if semaphore is None:
|
125 |
+
return "None"
|
126 |
+
return f"Semaphore(value={semaphore._value}, locked={semaphore.locked()})"
|