Spaces:
Running
Running
File size: 8,260 Bytes
2e77168 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 |
import base64
import os
import time
import gradio as gr
import modelscope_studio.components.antd as antd
import modelscope_studio.components.antdx as antdx
import modelscope_studio.components.base as ms
import modelscope_studio.components.pro as pro
from modelscope_studio.components.pro.chatbot import (ChatbotBotConfig,
ChatbotPromptsConfig,
ChatbotWelcomeConfig)
from modelscope_studio.components.pro.multimodal_input import \
MultimodalInputUploadConfig
from openai import OpenAI
client = OpenAI(
base_url='https://api-inference.modelscope.cn/v1/',
api_key=os.getenv("MODELSCOPE_API_KEY"), # ModelScope Token
)
model = "Qwen/Qwen2.5-VL-72B-Instruct"
def prompt_select(input_value, e: gr.EventData):
input_value["text"] = e._data["payload"][0]["value"]["description"]
return gr.update(value=input_value)
def clear():
return gr.update(value=None)
def cancel(chatbot_value):
chatbot_value[-1]["loading"] = False
chatbot_value[-1]["status"] = "done"
chatbot_value[-1]["footer"] = "Chat completion paused"
return gr.update(value=chatbot_value), gr.update(loading=False), gr.update(
disabled=False)
def retry(chatbot_value, e: gr.EventData):
index = e._data["payload"][0]["index"]
chatbot_value = chatbot_value[:index]
yield gr.update(loading=True), gr.update(value=chatbot_value), gr.update(
disabled=True)
for chunk in submit(None, chatbot_value):
yield chunk
def image_to_base64(image_path):
with open(image_path, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
return f"data:image/jpeg;base64,{encoded_string}"
def format_history(history):
messages = [{"role": "system", "content": "You are a helpful assistant."}]
for item in history:
if item["role"] == "user":
messages.append({
"role":
"user",
"content": [{
"type": "image_url",
"image_url": image_to_base64(file)
} for file in item["content"][0]["content"]
if os.path.exists(file)] +
[{
"type": "text",
"text": item["content"][1]["content"]
}]
})
elif item["role"] == "assistant":
# ignore thought message
messages.append({"role": "assistant", "content": item["content"]})
return messages
def submit(input_value, chatbot_value):
if input_value is not None:
chatbot_value.append({
"role":
"user",
"content": [{
"type": "file",
"content": [f for f in input_value["files"]]
}, {
"type": "text",
"content": input_value["text"]
}],
})
history_messages = format_history(chatbot_value)
chatbot_value.append({
"role": "assistant",
"content": "",
"loading": True,
"status": "pending"
})
yield {
input: gr.update(value=None, loading=True),
clear_btn: gr.update(disabled=True),
chatbot: gr.update(value=chatbot_value)
}
try:
response = client.chat.completions.create(model=model,
messages=history_messages,
stream=True)
start_time = time.time()
for chunk in response:
chatbot_value[-1]["content"] += chunk.choices[0].delta.content
chatbot_value[-1]["loading"] = False
yield {chatbot: gr.update(value=chatbot_value)}
chatbot_value[-1]["footer"] = "{:.2f}".format(time.time() -
start_time) + 's'
chatbot_value[-1]["status"] = "done"
yield {
clear_btn: gr.update(disabled=False),
input: gr.update(loading=False),
chatbot: gr.update(value=chatbot_value),
}
except Exception as e:
chatbot_value[-1]["loading"] = False
chatbot_value[-1]["status"] = "done"
chatbot_value[-1]["content"] = "Failed to respond, please try again."
yield {
clear_btn: gr.update(disabled=False),
input: gr.update(loading=False),
chatbot: gr.update(value=chatbot_value),
}
raise e
with gr.Blocks() as demo, ms.Application(), antdx.XProvider(), ms.AutoLoading(
):
with antd.Flex(vertical=True, gap="middle", elem_style=dict(height=800)):
chatbot = pro.Chatbot(
# for flex=1 to fill the remaining space
height=0,
elem_style=dict(flex=1),
welcome_config=ChatbotWelcomeConfig(
variant="borderless",
icon=
"https://assets.alicdn.com/g/qwenweb/qwen-webui-fe/0.0.44/static/favicon.png",
title=f"Hello, I'm {model}",
description="You can upload images and text to get started.",
prompts=ChatbotPromptsConfig(
title="How can I help you today?",
styles={
"list": {
"width": '100%',
},
"item": {
"flex": 1,
},
},
items=[{
"label":
"π Make a plan",
"children": [{
"description":
"Help me with a plan to start a business"
}, {
"description":
"Help me with a plan to achieve my goals"
}, {
"description":
"Help me with a plan for a successful interview"
}]
}, {
"label":
"π
Help me write",
"children": [{
"description":
"Help me write a story with a twist ending"
}, {
"description":
"Help me write a blog post on mental health"
}, {
"description":
"Help me write a letter to my future self"
}]
}])),
user_config=dict(
avatar="https://api.dicebear.com/7.x/miniavs/svg?seed=3"),
bot_config=ChatbotBotConfig(
header=model,
actions=["copy", "retry"],
avatar=
"https://assets.alicdn.com/g/qwenweb/qwen-webui-fe/0.0.44/static/favicon.png"
),
)
with pro.MultimodalInput(upload_config=MultimodalInputUploadConfig(
max_count=4, multiple=True, accept="image/*")) as input:
with ms.Slot("prefix"):
with antd.Button(value=None, color="default",
variant="text") as clear_btn:
with ms.Slot("icon"):
antd.Icon("ClearOutlined")
clear_btn.click(fn=clear, outputs=[chatbot])
submit_event = input.submit(fn=submit,
inputs=[input, chatbot],
outputs=[input, chatbot, clear_btn])
input.cancel(fn=cancel,
inputs=[chatbot],
outputs=[chatbot, input, clear_btn],
cancels=[submit_event],
queue=False)
chatbot.retry(fn=retry,
inputs=[chatbot],
outputs=[input, chatbot, clear_btn])
chatbot.welcome_prompt_select(fn=prompt_select,
inputs=[input],
outputs=[input])
if __name__ == "__main__":
demo.queue().launch()
|