Spaces:
Running
Running
File size: 1,438 Bytes
db32e48 afb453a 1969c22 e841ba5 db32e48 e0dfb76 db32e48 aa39585 db32e48 afb453a db32e48 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import os
import gradio as gr
import modelscope_studio.components.base as ms
import modelscope_studio.components.legacy as mgr
from modelscope_studio.components.legacy.Chatbot.llm_thinking_presets import \
qwen
def resolve_assets(relative_path):
return os.path.join(os.path.dirname(__file__), "../../resources",
relative_path)
conversation = [
[
None, {
"text": f"""
Use accordion tag:
<accordion title="Using tool">
```json
{{"text": "glorious weather", "resolution": "1024*1024"}}
```
</accordion>
Qwen preset:
Action: image_gen
Action Input: {{"text": "glorious weather", "resolution": "1024*1024"}}
Observation: <result>})</result> Based on your description"glorious weather",I generated a picture.})
Action: 「An arbitrary text representation that will be displayed as the name of the thought chain call」
Action Input: 「Any json or md content will be displayed in the drop-down box of the calling process」
Observation: <result>「Any md content will be displayed in the drop-down box when the call is completed」</result>
""",
"flushing": False
}
],
]
with gr.Blocks() as demo, ms.Application():
mgr.Chatbot(
value=conversation,
llm_thinking_presets=[qwen()],
height=600,
)
if __name__ == "__main__":
demo.queue().launch()
|