diff --git a/.gitattributes b/.gitattributes
index 7bc225d392b90484281c55c3e4f931eb5958e7be..dcee3a14925171a6399b9375eadead6a52f07689 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -31,4 +31,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
-*.ckpt filter=lfs diff=lfs merge=lfs -text
\ No newline at end of file
+*.ckpt filter=lfs diff=lfs merge=lfs -text
+*.whl filter=lfs diff=lfs merge=lfs -text
+*.mp4 filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..591a4d09126ca56317dff63a8958de0a03d103a5
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,60 @@
+.eggs/
+dist/
+*.pyc
+__pycache__/
+*.py[cod]
+*$py.class
+__tmp/*
+*.pyi
+templates
+
+# common
+
+!.*ignore
+!.*rc
+!.gitattributes
+!.aoneci.yml
+!.editorconfig
+
+# Logs
+logs
+*.log*
+
+# Runtime data
+pids
+*.pid
+*.seed
+*.pid.lock
+
+# Directory for instrumented libs generated by jscoverage/JSCover
+lib-cov
+
+# Coverage directory used by tools like istanbul
+coverage
+
+# Dependency directories
+bower_components
+node_modules/
+jspm_packages/
+
+# Compiled binary addons (https://nodejs.org/api/addons.html)
+build/Release
+lib
+dist
+
+# TypeScript v1 declaration files
+typings/
+
+# Output of 'npm pack'
+*.tgz
+
+# xconsole
+src/.xconsole
+build
+.faas_debug_tmp
+.yarn
+.yalc
+yalc.lock
+.eslintcache
+.stylelintcache
+.DS_Store
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..02d9fbef0166f776e36177b00c1e0a9e0dd526ff
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,15 @@
+FROM python:3.9
+
+WORKDIR /code
+
+COPY --link --chown=1000 . .
+
+RUN mkdir -p /tmp/cache/
+RUN chmod a+rwx -R /tmp/cache/
+ENV TRANSFORMERS_CACHE=/tmp/cache/
+
+RUN pip install --no-cache-dir -r requirements.txt
+
+ENV PYTHONUNBUFFERED=1 GRADIO_ALLOW_FLAGGING=never GRADIO_NUM_PORTS=1 GRADIO_SERVER_NAME=0.0.0.0 GRADIO_SERVER_PORT=7860 SYSTEM=spaces
+
+CMD ["python", "app.py"]
diff --git a/README-zh_CN.md b/README-zh_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..2a09c8a466384f1ceb2308cd25cf2585e184c6d7
--- /dev/null
+++ b/README-zh_CN.md
@@ -0,0 +1,26 @@
+
Modelscope Studio
+
+
+
+
+ ✖️
+
+
+
+
+
+Github | 🤖 Modelscope Studio | 🤗 HuggingFace Space
+
+`modelscope_studio` 是一套基于 gradio 4.x 的扩展组件库,致力于服务于 ModelScope 创空间中对于 gradio 应用的各类扩展需求,目前主要聚集在对话场景增强、多模态场景以及一些其他垂直场景支持。
+
+## Install
+
+```sh
+pip install modelscope_studio
+```
+
+## Components
+
+- Chatbot
+- MultimodalInput
+- Markdown
diff --git a/README.md b/README.md
index d22ace8b314538afa7c1c581d866911955580e50..03856e75f1b95f44b4360427277c205ee973f5b9 100644
--- a/README.md
+++ b/README.md
@@ -1,25 +1,42 @@
---
-# 详细文档见https://modelscope.cn/docs/%E5%88%9B%E7%A9%BA%E9%97%B4%E5%8D%A1%E7%89%87
-domain: #领域:cv/nlp/audio/multi-modal/AutoML
-# - cv
-tags: #自定义标签
--
-datasets: #关联数据集
- evaluation:
- #- iic/ICDAR13_HCTR_Dataset
- test:
- #- iic/MTWI
- train:
- #- iic/SIBR
-models: #关联模型
-#- iic/ofa_ocr-recognition_general_base_zh
-
-## 启动文件(若SDK为Gradio/Streamlit,默认为app.py, 若为Static HTML, 默认为index.html)
-# deployspec:
-# entry_file: app.py
-license: Apache License 2.0
+tags:
+ - gradio-custom-component
+ - Chatbot
+ - MutilmodalInput
+ - Markdown
+ - gradio-template-Chatbot
+ - gradio-template-Markdown
+title: modelscope-studio
+colorFrom: blue
+colorTo: gray
+sdk: docker
+pinned: false
+license: apache-2.0
---
-#### Clone with HTTP
-```bash
- git clone https://www.modelscope.cn/studios/modelscope/modelscope_gradio_components.git
-```
\ No newline at end of file
+
+
Modelscope Studio
+
+
+
+
+ ✖️
+
+
+
+
+
+Github | 🤖 Modelscope Studio | 🤗 HuggingFace Space
+
+`modelscope_studio` is a set of extension component libraries based on gradio 4.x, dedicated to serving the various extension needs of gradio applications within the ModelScope Studio. It mainly focuses on enhancing conversational scenarios, supporting multimodal contexts, and providing assistance for various other specialized scenarios.
+
+## Install
+
+```sh
+pip install modelscope_studio
+```
+
+## Components
+
+- Chatbot
+- MultimodalInput
+- Markdown
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..41659af68b2e1bca6472fbc7d8b25c8da1033d52
--- /dev/null
+++ b/app.py
@@ -0,0 +1,22 @@
+import gradio as gr
+from components.Chatbot.app import docs as chatbot_docs
+from components.Docs import Docs
+from components.Markdown.app import docs as markdown_docs
+from components.MultimodalInput.app import docs as multimodel_input_docs
+
+readme_docs = Docs(__file__)
+
+docs = [
+ ["Quick Start", readme_docs],
+ ["Chatbot", chatbot_docs],
+ ["Markdown", markdown_docs],
+ ["MultimodalInput", multimodel_input_docs],
+]
+
+with gr.Blocks() as demo:
+ with gr.Tabs() as components_tabs:
+ for doc in docs:
+ with gr.TabItem(doc[0], id=doc[0]):
+ doc[1].render(components_tabs)
+
+demo.queue().launch()
diff --git a/components/Chatbot/README-zh_CN.md b/components/Chatbot/README-zh_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..f71f6cd53689f9895bbbc7a6d67b73eeb09a16b2
--- /dev/null
+++ b/components/Chatbot/README-zh_CN.md
@@ -0,0 +1,137 @@
+# Chatbot
+
+升级版的 gradio Chatbot。
+
+- 支持前端匀速流式输出 message
+- 支持输出多模态内容(音频、视频、语音、文件、文本)
+- 支持多 agent 场景
+- 支持自定义渲染组件,并与 Python 侧事件交互
+
+## 如何使用
+
+### 基本使用
+
+
+
+### 多模态 & 支持本地文件的展示
+
+
+
+### 控制打字机单句 message 开关
+
+
+
+### 支持手风琴内容展示
+
+在返回的内容中加入 `accordion` 标签,可以在内容中加入手风琴,更多用法详见 Markdown 内置自定义标签
+
+同时为了适配大模型的工具调用链路,额外对某些大模型的格式做了预设配置,支持下述格式的预设处理(会将下面的格式转换成上方`accordion`标签包裹形式)
+
+```python
+import modelscope_studio as mgr
+from modelscope_studio.components.Chatbot.llm_thinking_presets import qwen
+
+# 添加 qwen 解析预设
+mgr.Chatbot(llm_thinking_presets=[qwen()])
+```
+
+```text
+Action: image_gen
+Action Input: {"text": "glorious weather", "resolution": "1024*1024"}
+Observation: ![IMAGEGEN](https://dashscope-result-sh.oss-cn-shanghai.aliyuncs.com/1d/a2/20231213/723609ee/1926736d-7c6e-4d2f-b438-b7746b3d89f5-1.png?Expires=1702537773&OSSAccessKeyId=LTAI5tQZd8AEcZX6KZV4G8qL&Signature=H%2B0rIn6BMfE%2BOr1uPb7%2Br9G3%2B5w%3D) 根据您的描述"glorious weather",我生成了一张图片。![](https://dashscope-result-sh.oss-cn-shanghai.aliyuncs.com/1d/a2/20231213/723609ee/1926736d-7c6e-4d2f-b438-b7746b3d89f5-1.png?Expires=1702537773&OSSAccessKeyId=LTAI5tQZd8AEcZX6KZV4G8qL&Signature=H%2B0rIn6BMfE%2BOr1uPb7%2Br9G3%2B5w%3D)
+
+Action: 「任意文本表示,将展示为思考链调用的名称」
+Action Input: 「任意json or md 内容,将展示到调用过程的下拉框」
+Observation: 「任意 md 内容,将作为完成调用的展示的下拉框内」
+```
+
+
+
+### 支持用户选择交互
+
+在返回的内容中加入 `select-box` 标签,更多用法详见 Markdown 内置自定义标签
+
+
+
+### 多 bot 场景
+
+
+
+### 自定义标签(高阶用法,需要了解前端知识)
+
+详见 Markdown 组件
+
+## API 及参数列表
+
+以下 API 均为在原有 gradio Chatbot 外的额外拓展参数。
+
+### value
+
+接口定义:
+
+```python
+
+class FileMessage(GradioModel):
+ file: FileData
+ alt_text: Optional[str] = None
+
+
+class MultimodalMessage(GradioModel):
+ # 默认以 index 为作为 id,id 改变会导致 message 重新渲染
+ id: Optional[str] = None
+ # message 容器的 elem id
+ elem_id: Optional[str] = None
+ # message 容器的 elem classes
+ elem_classes: Optional[list[str] | str] = None
+ name: Optional[str] = None
+ text: Optional[str] = None
+ flushing: Optional[bool] = None
+ avatar: Optional[Union[str, FileData]] = ''
+ files: Optional[List[Union[FileMessage, dict, FileData, str]]] = None
+
+# 支持多 bot 场景
+MultimodalMessageItem = Optional[Union[MultimodalMessage, MultimodalInputData,
+ dict, str]]
+
+
+class ChatbotData(GradioRootModel):
+ root: List[Tuple[Union[MultimodalMessageItem, List[MultimodalMessageItem]],
+ Union[MultimodalMessageItem,
+ List[MultimodalMessageItem]]]]
+```
+
+### props
+
+| 属性 | 类型 | 默认值 | 描述 |
+| ----------------------------- | -------------------------------------------------------------------------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| flushing | bool | True | 是否开启打字机效果。默认只有 bot 的 message 会开启,可以通过单独修改 message 的 flushing 属性精确控制每一条 message 的显示效果 |
+| enable_base64 | bool | False | 是否支持渲染的内容为 base64,因为直接渲染 base64 会带来安全问题,默认为 False。 |
+| enable_latex | bool | True | 是否支持 Latex 公式渲染 |
+| latex_single_dollar_delimiter | bool | True | 是否支持单`$`符号在 Latex 公式中渲染 |
+| preview | bool | True | 是否开启图片预览功能 |
+| avatar_images | tuple\[str \| Path \| None \| dict \| list, str \| Path \| None \| dict\| list\] | None | 拓展gr.Chatbot的参数值,除了接收 url 外还可以接收 dict 和 list,dict 可以传入avatar和name字段,name字段在渲染时会显示在头像下方。
- 当传入 dict 时,必须包含有avatar字段。
- 当传入 list 时,一般对应多 bot 模式,每一项可以接收前面所有的值,每个 bot 的头像与 message 中 bot 的位置一一对应 |
+| avatar_image_align | Literal['top', 'middle', 'bottom'] | 'bottom' | 控制头像与 message 的对齐方式,默认为下对齐 |
+| avatar_image_width | int | 45 | 头像与名称的宽度 |
+| flushing_speed | int | 3 | 打字机速度,值为 1 - 10,值越大速度越快 |
+| llm_thinking_presets | list\[dict\] | \[\] | llm 思考链路解析预设,可以将 llm 调用工具的输出格式转为固定的前端展示格式,需要从modelscope_studio.Chatbot.llm_thinking_presets引入,目前支持:qwen |
+| custom_components | dict\[str, CustomComponentDict\] CustomComponentDict 定义见下方 | None | 支持用户定义自定义标签,并通过 js 控制标签渲染样式与触发 python 事件。 |
+
+**CustomComponent 定义如下**
+
+```python
+class CustomComponentDict(TypedDict):
+ props: Optional[List[str]]
+ template: Optional[str]
+ js: Optional[str]
+```
+
+### 内置的自定义标签
+
+见 Markdown 内置自定义标签
+
+### event listeners
+
+| 事件 | 描述 |
+| ------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `mgr.Chatbot.flushed(fn, ···)` | 当打字机效果结束时触发。EventData 为:
- index:当前 message 的 index tuple。
- value:当前 message value。 |
+| `mgr.Chatbot.custom(fn, ···)` | 自定义标签触发事件时触发,EventData 为:
- index:前 message 的 index tuple。
- tag:当前触发的标签。
- tag_index:当前触发标签的 index,此 index 在 mesage 的 index tuple 基础上重新计算。
- value:自定义传入的值。 |
diff --git a/components/Chatbot/README.md b/components/Chatbot/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..79ccbf10ec38d672039cd4abcd69de88db1937f6
--- /dev/null
+++ b/components/Chatbot/README.md
@@ -0,0 +1,136 @@
+# Chatbot
+
+Upgraded gradio Chatbot.
+
+- Supports uniform frontend streaming output of messages
+- Supports output of multimodal content (audio, video, voice, files, text)
+- Supports multi-agent scenarios
+- Supports custom rendering components and interaction with events on the Python side
+
+## How to Use
+
+### Basic Usage
+
+
+
+### Multimodal & Support for Local File Display
+
+
+
+### Control for Typewriter Single Sentence Message
+
+
+
+### Support for Accordion Content Display
+
+Include the `accordion` tag in the returned content to add an accordion within the content. For more usage details, see Markdown Built-in Custom Tags.
+Additionally, to adapt to the toolchain usage of large models, some preset configurations for certain large models have been made. Support for the following preset formats (which will be converted into the form wrapped by the above `accordion` tag).
+
+```python
+import modelscope_studio as mgr
+from modelscope_studio.components.Chatbot.llm_thinking_presets import qwen
+
+# Add qwen preset
+mgr.Chatbot(llm_thinking_presets=[qwen()])
+```
+
+```text
+Action: image_gen
+Action Input: {"text": "glorious weather", "resolution": "1024*1024"}
+Observation: ![IMAGEGEN](https://dashscope-result-sh.oss-cn-shanghai.aliyuncs.com/1d/a2/20231213/723609ee/1926736d-7c6e-4d2f-b438-b7746b3d89f5-1.png?Expires=1702537773&OSSAccessKeyId=LTAI5tQZd8AEcZX6KZV4G8qL&Signature=H%2B0rIn6BMfE%2BOr1uPb7%2Br9G3%2B5w%3D) Based on your description: glorious weather,I generated a picture.[](https://dashscope-result-sh.oss-cn-shanghai.aliyuncs.com/1d/a2/20231213/723609ee/1926736d-7c6e-4d2f-b438-b7746b3d89f5-1.png?Expires=1702537773&OSSAccessKeyId=LTAI5tQZd8AEcZX6KZV4G8qL&Signature=H%2B0rIn6BMfE%2BOr1uPb7%2Br9G3%2B5w%3D)
+
+Action: 「An arbitrary text representation that will be displayed as the name of the thought chain call」
+Action Input: 「Any json or md content will be displayed in the drop-down box of the calling process」
+Observation: 「Any md content will be displayed in the drop-down box when the call is completed」
+```
+
+
+
+### Support for User Selection Interaction
+
+Include the `select-box` tag in the returned content for more usage details, see Markdown Built-in Custom Tags .
+
+
+
+### Multi-bot Scenarios
+
+
+
+### Custom Tags (Advanced Usage, Requires Frontend Knowledge)
+
+See the Markdown component for details.
+
+## API and Parameter List
+
+The following APIs are additional extended parameters beyond the original gradio Chatbot.
+
+### value
+
+Interface definition:
+
+```python
+
+class FileMessage(GradioModel):
+ file: FileData
+ alt_text: Optional[str] = None
+
+
+class MultimodalMessage(GradioModel):
+ # By default, message index is used as id. it will cause the message to be re-rendered when id changed.
+ id: Optional[str] = None
+ # elem id of message container
+ elem_id: Optional[str] = None
+ # elem classes of message container
+ elem_classes: Optional[list[str] | str] = None
+ name: Optional[str] = None
+ text: Optional[str] = None
+ flushing: Optional[bool] = None
+ avatar: Optional[Union[str, FileData]] = ''
+ files: Optional[List[Union[FileMessage, dict, FileData, str]]] = None
+
+# Support multi-bot scenarios
+MultimodalMessageItem = Optional[Union[MultimodalMessage, MultimodalInputData,
+ dict, str]]
+
+
+class ChatbotData(GradioRootModel):
+ root: List[Tuple[Union[MultimodalMessageItem, List[MultimodalMessageItem]],
+ Union[MultimodalMessageItem,
+ List[MultimodalMessageItem]]]]
+```
+
+### props
+
+| Attribute | Type | Default Value | Description |
+| ----------------------------- | -------------------------------------------------------------------------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| flushing | bool | True | Whether to enable the typewriter effect. By default, only the bot's messages will have this effect, but you can control the display effect of each message precisely by modifying the flushing attribute of a message individually. |
+| enable_base64 | bool | False | Whether to support rendering content as base64, since rendering base64 is unsafe, the default is False. |
+| enable_latex | bool | True | Whether to enable LaTeX rendering. |
+| latex_single_dollar_delimiter | bool | True | Whether to enable single dollar delimiter `$` for LaTeX rendering. |
+| preview | bool | True | Whether to enable image preview functionality. |
+| avatar_images | tuple\[str \| Path \| None \| dict \| list, str \| Path \| None \| dict\| list\] | None | An extended parameter value for gr.Chatbot, in addition to accepting a URL, it can also accept a dict and list. The dict can include the fields avatar and name, where the name field will be displayed under the avatar when rendered.
- When passing a dict, it must include an avatar field.
- When passing a list, it generally corresponds to the multi-bot mode, where each item can receive all the aforementioned values, and each bot’s avatar matches with the position of the bot in the messages. |
+| avatar_image_align | Literal['top', 'middle', 'bottom'] | 'bottom' | Controls the alignment of the avatar with the messages, default is bottom-aligned. |
+| avatar_image_width | int | 45 | The width of the avatar and name. |
+| flushing_speed | int | 3 | Typewriter speed, values range from 1 - 10, with larger values indicating faster speeds. |
+| llm_thinking_presets | list\[dict\] | \[\] | llm thinking link presets, which can convert the output format of llm calling tools into a fixed front-end display format. It needs to be imported from modelscope_studio.Chatbot.llm_thinking_presets, and currently supports: qwen. |
+| custom_components | dict\[str, CustomComponentDict\] CustomComponentDict is defined below | None | Allows users to define custom tags and control tag rendering styles and trigger Python events through JS. |
+
+**Definition of CustomComponent is as follows:**
+
+```python
+class CustomComponentDict(TypedDict):
+ props: Optional[List[str]]
+ template: Optional[str]
+ js: Optional[str]
+```
+
+### Built-in Custom Tags
+
+See Markdown Built-in Custom Tags
+
+### event listeners
+
+| Event | Description |
+| ------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `mgr.Chatbot.flushed(fn, ···)` | Triggered when the typewriter effect ends. EventData is:
- index: The index tuple of the current message.
- value: The current message value. |
+| `mgr.Chatbot.custom(fn, ···)` | Triggered when a custom tag event occurs. EventData is:
- index: The index tuple of the previous message.
- tag: The current tag that triggered the event.
- tag_index: The index of the current triggered tag, re-calculated based on the index tuple of the message.
- value: The custom value passed in. |
diff --git a/components/Chatbot/app.py b/components/Chatbot/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..77372ef291165f2cf77a8944f3f8b18773972190
--- /dev/null
+++ b/components/Chatbot/app.py
@@ -0,0 +1,6 @@
+from components.Docs import Docs
+
+docs = Docs(__file__)
+
+if __name__ == "__main__":
+ docs.render().queue().launch()
diff --git a/components/Chatbot/demos/accordion.py b/components/Chatbot/demos/accordion.py
new file mode 100644
index 0000000000000000000000000000000000000000..062cf158165ab632e32ebb061acf70504dd82b5c
--- /dev/null
+++ b/components/Chatbot/demos/accordion.py
@@ -0,0 +1,49 @@
+import os
+
+import gradio as gr
+
+import modelscope_studio as mgr
+from modelscope_studio.components.Chatbot.llm_thinking_presets import qwen
+
+
+def resolve_assets(relative_path):
+ return os.path.join(os.path.dirname(__file__), "../resources",
+ relative_path)
+
+
+conversation = [
+ [
+ None, {
+ "text": f"""
+Use accordion tag:
+
+
+```json
+{{"text": "glorious weather", "resolution": "1024*1024"}}
+```
+
+
+
+Qwen preset:
+Action: image_gen
+Action Input: {{"text": "glorious weather", "resolution": "1024*1024"}}
+Observation: ![IMAGEGEN]({resolve_assets("screen.jpeg")}) Based on your description"glorious weather",I generated a picture.![]({resolve_assets("screen.jpeg")})
+
+Action: 「An arbitrary text representation that will be displayed as the name of the thought chain call」
+Action Input: 「Any json or md content will be displayed in the drop-down box of the calling process」
+Observation: 「Any md content will be displayed in the drop-down box when the call is completed」
+""",
+ "flushing": False
+ }
+ ],
+]
+
+with gr.Blocks() as demo:
+ mgr.Chatbot(
+ value=conversation,
+ llm_thinking_presets=[qwen()],
+ height=600,
+ )
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Chatbot/demos/basic.py b/components/Chatbot/demos/basic.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ad16955faa1b4a91705ce40258bfc83734ec46b
--- /dev/null
+++ b/components/Chatbot/demos/basic.py
@@ -0,0 +1,55 @@
+import os
+import time
+
+import gradio as gr
+
+import modelscope_studio as mgr
+
+conversation = [
+ [
+ None,
+ {
+ # The first message of bot closes the typewriter.
+ "text": "Hello I'm a chatbot",
+ "flushing": False
+ }
+ ],
+]
+
+
+def submit(_input, _chatbot):
+ _chatbot.append([_input, None])
+ yield gr.update(interactive=False, value=None), _chatbot
+ time.sleep(2)
+ _chatbot[-1][1] = {"text": _input.text + '!'}
+ yield {
+ chatbot: _chatbot,
+ }
+
+
+def flushed():
+ return gr.update(interactive=True)
+
+
+with gr.Blocks() as demo:
+ chatbot = mgr.Chatbot(
+ value=conversation,
+ avatar_images=[
+ os.path.join(os.path.dirname(__file__), "../resources/user.jpeg"),
+ {
+ "name":
+ "bot",
+ "avatar":
+ os.path.join(os.path.dirname(__file__),
+ "../resources/bot.jpeg")
+ }
+ ],
+ height=600,
+ )
+
+ input = mgr.MultimodalInput()
+ input.submit(fn=submit, inputs=[input, chatbot], outputs=[input, chatbot])
+ chatbot.flushed(fn=flushed, outputs=[input])
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Chatbot/demos/message_config.py b/components/Chatbot/demos/message_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2fe3e1bb71a97e0ca18ef343ab05ca2ef492d96
--- /dev/null
+++ b/components/Chatbot/demos/message_config.py
@@ -0,0 +1,35 @@
+import time
+
+import gradio as gr
+
+import modelscope_studio as mgr
+
+
+def submit(_chatbot):
+ _chatbot.append(["test user",
+ "test bot"]) # bot starts the typewriter by default
+ yield _chatbot
+ time.sleep(2)
+ _chatbot.append(["test user", {
+ "text": "test bot",
+ "flushing": False
+ }]) # both start the typewriter
+ yield _chatbot
+ time.sleep(2)
+ _chatbot.append([{
+ "text": "test user",
+ "flushing": True
+ }, {
+ "text": "test bot",
+ "flushing": False
+ }]) # user starts the typewriter
+ yield _chatbot
+
+
+with gr.Blocks() as demo:
+ chatbot = mgr.Chatbot(height=600, )
+ button = gr.Button("Submit")
+ button.click(fn=submit, inputs=[chatbot], outputs=[chatbot])
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Chatbot/demos/multi_bots.py b/components/Chatbot/demos/multi_bots.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb961e7dc1fe7f20bb4eca1b2e4f2fc8a00899aa
--- /dev/null
+++ b/components/Chatbot/demos/multi_bots.py
@@ -0,0 +1,87 @@
+import os
+import time
+
+import gradio as gr
+
+import modelscope_studio as mgr
+
+
+def resolve_assets(relative_path):
+ return os.path.join(os.path.dirname(__file__), "../resources",
+ relative_path)
+
+
+conversation = [
+ [None, {
+ "text": "Hello I'm a chatbot",
+ "flushing": False
+ }],
+]
+
+
+def get_last_bot_message(chatbot):
+ return chatbot[-1][1]
+
+
+def create_music_bot_message(text: str):
+ return {
+ "text": text,
+ }
+
+
+def create_image_bot_message(text: str):
+ return {
+ "text": text,
+ }
+
+
+def submit(_input, _chatbot):
+ _chatbot.append([_input, None])
+ yield gr.update(interactive=False, value=None), _chatbot
+ _chatbot[-1][1] = [
+ "Hello",
+ create_image_bot_message("Hello"),
+ create_music_bot_message("Hello")
+ ]
+
+ time.sleep(2)
+ get_last_bot_message(_chatbot)[1][
+ "text"] = f"""Hello, I\'m a image bot\n![image]({resolve_assets("user.jpeg")})"""
+ get_last_bot_message(_chatbot)[2][
+ "text"] = f"""Hello, I\'m a music bot """
+ yield {
+ chatbot: _chatbot,
+ }
+
+
+def flushed():
+ return gr.update(interactive=True)
+
+
+with gr.Blocks() as demo:
+ chatbot = mgr.Chatbot(
+ value=conversation,
+ avatar_image_width=40,
+ avatar_images=[
+ resolve_assets('user.jpeg'),
+ # default bot avatar and name
+ [{
+ "name": "bot",
+ "avatar": resolve_assets('bot.jpeg')
+ }, {
+ "name": "image bot",
+ "avatar": resolve_assets('image-bot.jpeg')
+ }, {
+ "name": "music bot",
+ "avatar": resolve_assets('music-bot.jpeg')
+ }]
+ ],
+ height=600,
+ )
+
+ input = mgr.MultimodalInput()
+ input.submit(fn=submit, inputs=[input, chatbot], outputs=[input, chatbot])
+ chatbot.flushed(fn=flushed, outputs=[input])
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Chatbot/demos/multimodal.py b/components/Chatbot/demos/multimodal.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c21f44e8ab1ede49423c8997feba2f0c097759a
--- /dev/null
+++ b/components/Chatbot/demos/multimodal.py
@@ -0,0 +1,43 @@
+import os
+
+import gradio as gr
+
+import modelscope_studio as mgr
+
+
+def resolve_assets(relative_path):
+ return os.path.join(os.path.dirname(__file__), "../resources",
+ relative_path)
+
+
+conversation = [
+ [
+ None, {
+ "text": f"""
+Image
+
+![image]({resolve_assets("bot.jpeg")})
+
+
+
+Video
+
+
+
+Audio
+
+
+""",
+ "flushing": False
+ }
+ ],
+]
+
+with gr.Blocks() as demo:
+ mgr.Chatbot(
+ value=conversation,
+ height=600,
+ )
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Chatbot/demos/select-box.py b/components/Chatbot/demos/select-box.py
new file mode 100644
index 0000000000000000000000000000000000000000..6035ba30cf5f7a956396693b600560e34c49073a
--- /dev/null
+++ b/components/Chatbot/demos/select-box.py
@@ -0,0 +1,48 @@
+import json
+
+import gradio as gr
+
+import modelscope_studio as mgr
+
+# `label` will display on the page, and `value` is the actual selected value.
+options = [{"label": "A", "value": "a"}, "b", "c"]
+
+conversation = [[
+ None, f"""
+Single Select:
+
+Multiple Select:
+
+Vertical Direction:
+
+
+
+Card Shape:
+
+
+
+
+
+
+
+
+"""
+]]
+
+
+# The custom data must be marked by `gr.EventData`
+def fn(data: gr.EventData):
+ print(data._data)
+
+
+with gr.Blocks() as demo:
+ chatbot = mgr.Chatbot(
+ value=conversation,
+ flushing=False,
+ height=600,
+ )
+ # All custom tags will trigger the custom event
+ chatbot.custom(fn=fn)
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Chatbot/resources/audio.wav b/components/Chatbot/resources/audio.wav
new file mode 100644
index 0000000000000000000000000000000000000000..105190ad88e2e177540361de340e54feb1587f3c
Binary files /dev/null and b/components/Chatbot/resources/audio.wav differ
diff --git a/components/Chatbot/resources/bot.jpeg b/components/Chatbot/resources/bot.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..5fde8cc45f61b677c0581e6889b11e269c35be08
Binary files /dev/null and b/components/Chatbot/resources/bot.jpeg differ
diff --git a/components/Chatbot/resources/custom_components/custom_select.js b/components/Chatbot/resources/custom_components/custom_select.js
new file mode 100644
index 0000000000000000000000000000000000000000..ca7e2ae712ca53eff775d1f5b22d9c004720db0b
--- /dev/null
+++ b/components/Chatbot/resources/custom_components/custom_select.js
@@ -0,0 +1,26 @@
+(props, cc, { el, onMount }) => {
+ const options = JSON.parse(props.options);
+ el.innerHTML = `
+ ${options
+ .map((option) => {
+ return `
+
+
`;
+ })
+ .join('')}
+ `;
+ onMount(() => {
+ const inputs = Array.from(el.getElementsByTagName('input'));
+ Array.from(el.getElementsByTagName('label')).forEach((label, i) => {
+ label.addEventListener('click', () => {
+ inputs.forEach((input) => {
+ input.checked = false;
+ });
+ const input = label.getElementsByTagName('input')[0];
+ input.checked = true;
+ // Use cc.dispatch to trigger events.
+ cc.dispatch(options[i]);
+ });
+ });
+ });
+};
diff --git a/components/Chatbot/resources/dog.mp4 b/components/Chatbot/resources/dog.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..062b9c81317de43f392c56e9e03444bf8cc31d51
--- /dev/null
+++ b/components/Chatbot/resources/dog.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:39d086ce29e48cf76e5042d2f3f0611ee46575f70fa3dc0c40dd4cfffde3d933
+size 8626383
diff --git a/components/Chatbot/resources/image-bot.jpeg b/components/Chatbot/resources/image-bot.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..5df101fedca7e0b76eaa1119506ca4c1941c9fc7
Binary files /dev/null and b/components/Chatbot/resources/image-bot.jpeg differ
diff --git a/components/Chatbot/resources/music-bot.jpeg b/components/Chatbot/resources/music-bot.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..00288fb576845921bbc3247ca8da3fbbdea12800
Binary files /dev/null and b/components/Chatbot/resources/music-bot.jpeg differ
diff --git a/components/Chatbot/resources/screen.jpeg b/components/Chatbot/resources/screen.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..574735acb117e86c5c0850e2b5489b8f8efa20cc
Binary files /dev/null and b/components/Chatbot/resources/screen.jpeg differ
diff --git a/components/Chatbot/resources/user.jpeg b/components/Chatbot/resources/user.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..536948b6bd19cb0b49c44b74e2790198301520e5
Binary files /dev/null and b/components/Chatbot/resources/user.jpeg differ
diff --git a/components/Docs.py b/components/Docs.py
new file mode 100644
index 0000000000000000000000000000000000000000..24f577e05280442648010f474691861d4ddeba28
--- /dev/null
+++ b/components/Docs.py
@@ -0,0 +1,162 @@
+import os
+import re
+from typing import Callable
+
+import gradio as gr
+
+import modelscope_studio as mgr
+
+from .parse_markdown import parse_markdown
+
+with open(os.path.join(os.path.dirname(__file__), "tab-link.js")) as f:
+ tab_link_js = f.read()
+
+custom_components = {
+ "tab-link": {
+ "props": ["tab", "component-tab"],
+ "js": tab_link_js
+ }
+}
+
+
+def remove_formatter(markdown_text):
+ pattern = r"^ *---[\s\S]*?---"
+
+ replaced_text = re.sub(pattern, "", markdown_text)
+
+ return replaced_text
+
+
+def list_demos(dir_path: str, prefix=''):
+ result = []
+ if (not os.path.isdir(dir_path)):
+ return result
+ for name in os.listdir(dir_path):
+ path = os.path.join(dir_path, name)
+
+ if os.path.isfile(path):
+ result.append(prefix + name)
+ elif os.path.isdir(path):
+ sub_prefix = prefix + name + '/'
+ result.extend(list_demos(path, sub_prefix))
+
+ return result
+
+
+def get_demo_modules(file_path: str):
+ import importlib.util
+
+ demos = [
+ demo for demo in list_demos(
+ os.path.join(os.path.dirname(file_path), "demos"))
+ if demo.endswith(".py")
+ ]
+ demo_modules = {}
+ for demo in demos:
+ demo_name = demo.split(".")[0]
+ spec = importlib.util.spec_from_file_location(
+ "demo", os.path.join(os.path.dirname(file_path), "demos", demo))
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
+ demo_modules[demo_name] = module
+ return demo_modules
+
+
+is_modelscope_studio = os.getenv('MODELSCOPE_ENVIRONMENT') == 'studio'
+
+
+class Docs:
+
+ def __init__(self, file_path: str, markdown_files: list = None):
+ self.file_path = file_path
+ self.demo_modules = get_demo_modules(file_path)
+ # default current directory
+ self.markdown_files = markdown_files if markdown_files else [
+ filename for filename in os.listdir(os.path.dirname(file_path))
+ if filename.endswith(".md")
+ ]
+ if is_modelscope_studio:
+ self.markdown_files = list(
+ filter(
+ lambda x: x.endswith("-zh_CN.md") or
+ (f"{'.'.join(x.split('.')[:-1])}-zh_CN.md" not in self.
+ markdown_files), self.markdown_files))
+ else:
+ self.markdown_files = list(
+ filter(lambda x: not x.endswith("-zh_CN.md"),
+ self.markdown_files))
+
+ self.tabs = None
+
+ def read_file(self, relative_path: str):
+ with open(os.path.join(os.path.dirname(self.file_path), relative_path),
+ "r") as f:
+ return f.read()
+
+ def render_demo(self, demo_name, prefix='', suffix=''):
+ content = self.read_file(f"./demos/{demo_name}.py")
+ module = self.demo_modules[demo_name]
+ with gr.Accordion("Show Demo", open=False):
+ with gr.Row():
+ with gr.Column():
+ mgr.Markdown(f"""
+{prefix}
+````python
+{content}
+````
+{suffix}
+""",
+ header_links=True,
+ custom_components=custom_components)
+ with gr.Column():
+ module.demo.render()
+
+ def render_markdown(self,
+ markdown_file,
+ on_tab_link_click: Callable = None,
+ components_tabs=None):
+ items = parse_markdown(remove_formatter(self.read_file(markdown_file)),
+ read_file=self.read_file)
+ for item in items:
+ if item["type"] == "text":
+ md = mgr.Markdown(item["value"],
+ header_links=True,
+ custom_components=custom_components,
+ preview=False)
+ deps = [dep for dep in [components_tabs, self.tabs] if dep]
+ if len(deps) > 0:
+ md.custom(fn=on_tab_link_click, outputs=deps)
+ elif item["type"] == "demo":
+ self.render_demo(item["name"],
+ prefix=item["prefix"],
+ suffix=item["suffix"])
+
+ def render(self, components_tabs=None):
+
+ def tab_link_click(data: gr.EventData):
+ tab: str = data._data["value"].get("tab", '')
+ component_tab: str = data._data["value"].get("component_tab", '')
+ if tab and tabs:
+ return {tabs: gr.update(selected=tab)}
+ elif components_tabs and component_tab:
+ return {components_tabs: gr.update(selected=component_tab)}
+
+ with gr.Blocks() as demo:
+
+ if len(self.markdown_files) > 1:
+ with gr.Tabs() as tabs:
+ self.tabs = tabs
+
+ for markdown_file in self.markdown_files:
+ tab_name = ".".join(markdown_file.split(".")[:-1])
+ tab_name = tab_name.split("-zh_CN")[0]
+ with gr.TabItem(tab_name, id=tab_name):
+ self.render_markdown(
+ markdown_file,
+ on_tab_link_click=tab_link_click,
+ components_tabs=components_tabs)
+ elif (len(self.markdown_files) == 1):
+ self.render_markdown(self.markdown_files[0],
+ on_tab_link_click=tab_link_click,
+ components_tabs=components_tabs)
+ return demo
diff --git a/components/Markdown/README-zh_CN.md b/components/Markdown/README-zh_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..c399f84423ef88f06eb63e55b5cd68e606227bef
--- /dev/null
+++ b/components/Markdown/README-zh_CN.md
@@ -0,0 +1,89 @@
+# Markdown
+
+升级版的 gradio Markdown。
+
+- 支持输出多模态内容(音频、视频、语音、文件、文本)
+- 支持自定义渲染组件,并与 Python 侧事件交互
+
+## 如何使用
+
+### 基本使用
+
+
+
+### 多模态 & 支持本地文件的展示
+
+
+
+### 支持手风琴内容展示
+
+在返回的内容中加入 `accordion` 标签,更多用法详见
accordion
+
+
+
+### 支持用户选择交互
+
+在返回的内容中加入 `select-box` 标签,更多用法详见
select-box
+
+
+
+### 自定义标签(高阶用法,需要了解前端知识)
+
+
+
+#### 引入 js
+
+
+
+template只能做简单的变量替换,如果想要引入更多自定义的行为,如条件判断、循环渲染等,请使用 js 控制 el 自行处理,下面是简单的示例:
+
+
+
+custom_select.js
+
+```js
+
+```
+
+
+
+
+#### 与 Python 侧交互
+
+在 js 中可以使用`cc.dispatch`触发 Python 侧监听的`custom`事件,以前面的custom_select.js为例,我们在前端调用了`cc.dispatch(options[i])`,则会向 Python 侧同时发送通知。
+
+
+
+## API 及参数列表
+
+以下 API 均为在原有 gradio Markdown 外的额外拓展参数。
+
+### props
+
+| 属性 | 类型 | 默认值 | 描述 |
+| ----------------------------- | --------------------------------------------------------------- | ------ | --------------------------------------------------------------------------- |
+| enable_base64 | bool | False | 是否支持渲染的内容为 base64,因为直接渲染 base64 有安全问题,默认为 False。 |
+| enable_latex | bool | True | 是否支持 Latex 公式渲染 |
+| latex_single_dollar_delimiter | bool | True | 是否支持单`$`符号在 Latex 公式中渲染 |
+| preview | bool | True | 是否开启图片预览功能 |
+| custom_components | dict\[str, CustomComponentDict\] CustomComponentDict 定义见下方 | None | 支持用户定义自定义标签,并通过 js 控制标签渲染样式与触发 python 事件。 |
+
+**CustomComponent 定义如下**
+
+```python
+class CustomComponentDict(TypedDict):
+ props: Optional[List[str]]
+ template: Optional[str]
+ js: Optional[str]
+```
+
+### 内置的自定义标签
+
+-
select-box
+-
accordion
+
+### event listeners
+
+| 事件 | 描述 |
+| ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| `mgr.Markdown.custom(fn, ···)` | 自定义标签触发事件时触发,EventData 为:
- index:当前 message 的 index tuple ([message index, user group(index 0) or bot group(index 1), user/bot group index])。
- tag:当前触发的标签。
- tag_index:当前触发标签的 index,此 index 在 mesage 的 index tuple 基础上重新计算。
- value:自定义传入的值。 |
diff --git a/components/Markdown/README.md b/components/Markdown/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..db32845077ceca2c4a19c9c70a82b05abcd85e2a
--- /dev/null
+++ b/components/Markdown/README.md
@@ -0,0 +1,85 @@
+# Markdown
+
+Upgraded gradio Markdown.
+
+- Supports output of multimodal content (audio, video, voice, files, text)
+- Supports custom rendering components and interaction with Python-side events
+
+## How to Use
+
+### Basic Usage
+
+
+
+### Multimodal & Support for Local File Display
+
+
+
+### Support for Accordion Content Display
+
+Include the `accordion` tag in the returned content for more usage details, see
accordion
+
+
+### Support for User Selection Interaction
+
+Include the `select-box` tag in the returned content for more usage details, see
select-box
+
+
+### Custom Tags (Advanced Usage, Requires Frontend Knowledge)
+
+
+
+#### Importing js
+
+
+The template can only perform simple variable replacements. If you want to introduce more custom behaviors, such as conditional judgments, loop rendering, etc., please use js to control the element for processing. Here is a simple example:
+
+
+custom_select.js
+
+```js
+
+```
+
+
+
+
+#### Interaction with Python Side
+
+In js, you can use `cc.dispatch` to trigger the `custom` event listened to on the Python side. Taking the previous custom_select.js as an example, when we call `cc.dispatch(options[i])` on the frontend, a notification will be sent to the Python side simultaneously.
+
+
+## API and Parameter List
+
+The following APIs are additional extended parameters beyond the original gradio Markdown.
+
+### props
+
+| Attribute | Type | Default Value | Description |
+| ----------------------------- | ------------------------------------------------------------------- | ------------- | ---------------------------------------------------------------------------------------------------------- |
+| enable_base64 | bool | False | Whether to support rendering content as base64, since rendering base64 is unsafe, the default is False. |
+| preview | bool | True | Whether to enable image preview functionality. |
+| enable_latex | bool | True | Whether to enable LaTeX rendering. |
+| latex_single_dollar_delimiter | bool | True | Whether to enable single dollar delimiter `$` for LaTeX rendering. |
+| custom_components | dict[str, CustomComponentDict] CustomComponentDict definition below | None | Supports user-defined custom tags and controls tag rendering styles and triggers Python events through js. |
+| |
+
+**CustomComponent definition is as follows:**
+
+```python
+class CustomComponentDict(TypedDict):
+ props: Optional[List[str]]
+ template: Optional[str]
+ js: Optional[str]
+```
+
+### Built-in Custom Tags
+
+-
select-box
+-
accordion
+
+### Event Listeners
+
+| Event | Description |
+| ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `mgr.Markdown.custom(fn, ···)` | Triggered when a custom tag event occurs. EventData is:
- index: The index tuple of the current message ([message index, user group(index 0) or bot group(index 1), user/bot group index]).
- tag: The current tag that triggered the event.
- tag_index: The index of the current triggered tag, re-calculated based on the message’s index tuple.
- value: The custom value passed in. |
diff --git a/components/Markdown/app.py b/components/Markdown/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e59b2548c878772ff9c32e0308b393caf24ed2e
--- /dev/null
+++ b/components/Markdown/app.py
@@ -0,0 +1,20 @@
+import os
+
+from components.Docs import Docs
+
+
+def resolve(relative_path: str):
+ return os.path.join(os.path.dirname(__file__), relative_path)
+
+
+docs = Docs(
+ __file__,
+ markdown_files=(["README.md", "README-zh_CN.md"] + [
+ f"custom_tags/{filename}"
+ for filename in os.listdir(resolve('custom_tags'))
+ if filename.endswith(".md")
+ ]),
+)
+
+if __name__ == "__main__":
+ docs.render().queue().launch()
diff --git a/components/Markdown/custom_tags/accordion-zh_CN.md b/components/Markdown/custom_tags/accordion-zh_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..6a62712515e4dc14cdfd17865deb80811fa15abb
--- /dev/null
+++ b/components/Markdown/custom_tags/accordion-zh_CN.md
@@ -0,0 +1,23 @@
+# accordion
+
+在 markdown 文本中添加手风琴效果。
+
+## 如何使用
+
+### 基本使用
+
+
+
+### 使用 accordion-title 标记
+
+使用`::accordion-title[content]`的形式可以在标题输入 markdown 文本。
+
+
+
+## API 及参数列表
+
+### props
+
+| 属性 | 类型 | 默认值 | 描述 |
+| ----- | ------ | ------ | ------------ |
+| title | string | | 手风琴的标题 |
diff --git a/components/Markdown/custom_tags/accordion.md b/components/Markdown/custom_tags/accordion.md
new file mode 100644
index 0000000000000000000000000000000000000000..66f1d1e9ea8505914a077d9dfce3ca8b577eace5
--- /dev/null
+++ b/components/Markdown/custom_tags/accordion.md
@@ -0,0 +1,22 @@
+# accordion
+
+Add an accordion effect to markdown text.
+
+## How to Use
+
+### Basic Usage
+
+
+
+### Using the accordion-title Marker
+
+Using the form `::accordion-title[content]`, you can input markdown text into the title.
+
+
+## API and Parameter List
+
+### props
+
+| Attribute | Type | Default Value | Description |
+| --------- | ------ | ------------- | -------------------------- |
+| title | string | | The title of the accordion |
diff --git a/components/Markdown/custom_tags/select-box-zh_CN.md b/components/Markdown/custom_tags/select-box-zh_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..c309287508232e756d9dd039d9b49dadec959f53
--- /dev/null
+++ b/components/Markdown/custom_tags/select-box-zh_CN.md
@@ -0,0 +1,45 @@
+# select-box
+
+在 markdown 文本中添加选择交互框。
+
+## 如何使用
+
+### 基本使用
+
+
+
+### Card 样式
+
+
+
+### Card 自适应内部元素宽度
+
+
+
+### 监听 Python 事件
+
+
+
+## API 及参数列表
+
+### value
+
+custom 事件中 custom_data value 对应值, 返回值为用户 options 传入的对应 value ,如果type="checkbox",则返回一个 list。
+
+### props
+
+| 属性 | 类型 | 默认值 | 描述 |
+| ------------ | ------------------------------------------------------------------------------------------- | ------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| type | 'checkbox' \| 'radio' | 'radio' | 选择框类型,'radio' 为单选框、'checkbox'为多选框。 |
+| disabled | boolean | | 禁用选择,通常在需要读取历史信息二次渲染时会用到。 |
+| value | string | | 默认选中值,通常适用于`type="checkbox"`时提前为用户选择部分选项和设置`disabled`后的默认值渲染。 |
+| direction | 'horizontal' \| 'vertical' | 'horizontal' | 横向或竖向排列选择框 |
+| shape | 'card' \| 'default' | 'default' | 选择框样式 |
+| options | (string\| { label?: string, value?: string, imgSrc?: string})\[\] | | 为用户提供的选项值,每一项可以为 string 或 object。 当值为 object 时可以接收更多自定义值,其中imgSrc只有当shape="card"时才生效。 |
+| select-once | boolean | false | 是否只允许用户选择一次 |
+| submit-text | string | | 提交按钮的展示值,当该属性有值时,会展示提交按钮,此时用户只有点击提交按钮后才会触发选择事件。 |
+| columns | number \| { xs?: number, sm?: number, md?: number, lg?: number, xl?: number, xxl?: number } | { xs: 1, sm: 2, md: 2, lg: 4} | 当shape="card"时才生效。每一行选项占用列数,值的范围为1 - 24,建议此项取值可以被 24 整除,否则可能列数会不符合预期。 当此项传入值为对象时,可以响应式控制每一行渲染列数,响应阈值如下:
- xs:屏幕 < 576px
- sm:屏幕 ≥ 576px
- md:屏幕 ≥ 768px
- lg:屏幕 ≥ 992px
- xl:屏幕 ≥ 1200px
- xxl:屏幕 ≥ 1600px 当direction为vertical时此配置不生效。 |
+| item-width | string | | 当shape="card"时才生效。每个选项的宽度,如:'auto'、'100px',默认使用 columns 自动分配的宽度。 |
+| item-height | string | | 当shape="card"时才生效。每个选项的高度,默认自适应元素高度。 |
+| img-height | string | '160px' | 当shape="card"时才生效。每个选项中图片的高度。 |
+| equal-height | boolean | false | 当shape="card"时才生效。是否每一行的选项高度都相等,会使用高度最高的选项。 |
diff --git a/components/Markdown/custom_tags/select-box.md b/components/Markdown/custom_tags/select-box.md
new file mode 100644
index 0000000000000000000000000000000000000000..fb2b210a291d29f0876f68d4af9c468014b3f2cb
--- /dev/null
+++ b/components/Markdown/custom_tags/select-box.md
@@ -0,0 +1,45 @@
+# select-box
+
+Add a selection interaction box to markdown text.
+
+## How to Use
+
+### Basic Usage
+
+
+
+### Card Style
+
+
+
+### Card Adapts to Width of Inner Elements
+
+
+
+### Listening to Python Events
+
+
+
+## API and Parameter List
+
+### value
+
+The corresponding value of custom_data value in the custom event; returns the user's inputted value for options. If type="checkbox", it returns a list.
+
+### props
+
+| Attribute | Type | Default Value | Description |
+| ------------ | ------------------------------------------------------------------------------------------- | ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| type | 'checkbox' \| 'radio' | 'radio' | The type of selection box: 'radio' for radio buttons, 'checkbox' for checkboxes. |
+| disabled | boolean | | Disable the selection, usually used when reading historical information for re-rendering. |
+| value | string | | The default selected value, usually used to pre-select some options for the user when `type="checkbox"` and to set the default value for rendering after setting `disabled`. |
+| direction | 'horizontal' \| 'vertical' | 'horizontal' | Arrange selection boxes horizontally or vertically. |
+| shape | 'card' \| 'default' | 'default' | The style of the selection box. |
+| options | (string \| { label?: string, value?: string, imgSrc?: string })[] | | The options provided to the user, each item can be a string or an object. When the value is an object, it can accept more custom values, where imgSrc only takes effect when shape="card". |
+| select-once | boolean | false | Whether to allow the user to make a selection only once. |
+| submit-text | string | | The display value of the submit button. When this attribute has a value, the submit button will be displayed, and the selection event will only be triggered after the user clicks the submit button. |
+| columns | number \| { xs?: number, sm?: number, md?: number, lg?: number, xl?: number, xxl?: number } | { xs: 1, sm: 2, md: 2, lg: 4 } | Effective only when shape="card". The number of columns each row of options occupies, the value range is 1 - 24, it is recommended that this value is divisible by 24, otherwise, the number of columns may not meet expectations. When this item is passed as an object, it can responsively control the number of rendered columns per row, responsive thresholds are as follows:
- xs: screen < 576px
- sm: screen ≥ 576px
- md: screen ≥ 768px
- lg: screen ≥ 992px
- xl: screen ≥ 1200px
- xxl: screen ≥ 1600px This configuration does not take effect when direction is vertical. |
+| item-width | string | | Effective only when shape="card". The width of each option, such as: 'auto', '100px', the default uses the width automatically allocated by columns. |
+| item-height | string | | Effective only when shape="card". The height of each option, defaults to adapt to the height of the element. |
+| img-height | string | '160px' | Effective only when shape="card". The height of the images within each option. |
+| equal-height | boolean | false | Effective only when shape="card". Whether the height of the options in each row should be equal, using the height of the tallest option. |
diff --git a/components/Markdown/demos/accordion.py b/components/Markdown/demos/accordion.py
new file mode 100644
index 0000000000000000000000000000000000000000..924b354830adb16e90c86d49003057c0fa3aa10a
--- /dev/null
+++ b/components/Markdown/demos/accordion.py
@@ -0,0 +1,28 @@
+import gradio as gr
+
+import modelscope_studio as mgr
+
+with gr.Blocks() as demo:
+ mgr.Markdown("""
+
+
+```json
+{"text": "glorious weather", "resolution": "1024*1024"}
+```
+
+
+
+Use `::accordion-title` to support markdown:
+
+
+
+::accordion-title[Using `tool`]
+
+```json
+{"text": "glorious weather", "resolution": "1024*1024"}
+```
+
+""")
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Markdown/demos/basic.py b/components/Markdown/demos/basic.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c423e26c64c6e003fdb4efce0284cc41d5140a2
--- /dev/null
+++ b/components/Markdown/demos/basic.py
@@ -0,0 +1,11 @@
+import gradio as gr
+
+import modelscope_studio as mgr
+
+with gr.Blocks() as demo:
+ mgr.Markdown(
+ "This _example_ was **written** in [Markdown](https://en.wikipedia.org/wiki/Markdown)\n"
+ )
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Markdown/demos/custom-tag.py b/components/Markdown/demos/custom-tag.py
new file mode 100644
index 0000000000000000000000000000000000000000..530e7027d182e9b76026e815c60723c073ef6b7c
--- /dev/null
+++ b/components/Markdown/demos/custom-tag.py
@@ -0,0 +1,21 @@
+import gradio as gr
+
+import modelscope_studio as mgr
+
+with gr.Blocks() as demo:
+ mgr.Markdown(
+ """
+custom tag:
+""",
+ custom_components={
+ # Key is the tag name
+ "custom-tag": {
+ # The tag props.
+ "props": ["value"],
+ # The tag template, use `{prop}` as placeholder。
+ "template": "
{value}
"
+ }
+ })
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Markdown/demos/custom-tag2.py b/components/Markdown/demos/custom-tag2.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbe6d6c596564ba253742db982585d2643fd0095
--- /dev/null
+++ b/components/Markdown/demos/custom-tag2.py
@@ -0,0 +1,38 @@
+import gradio as gr
+
+import modelscope_studio as mgr
+
+with gr.Blocks() as demo:
+ mgr.Markdown(
+ """
+custom tag:
+""",
+ custom_components={
+ "custom-tag": {
+ "props": ["value"],
+ "template":
+ "
",
+ # The `js` property should be a string containing a JavaScript Function.
+ "js":
+ """
+(props, cc, { el, onMount }) => {
+ // `onMount` will be called after the template rendered
+ onMount(() => {
+ // `el` is the container element
+ console.log(el)
+ })
+ console.log(props.children) // By default, `props` will be passed a property named `children`, which can get the content in the tag, such as 'xx' in '
xx'.
+
+ // The return value will be merged with `props` and passed to the template.
+ return {
+ value: 'Click Me: ' + props.value,
+ onClick: () => {
+ alert('hello')
+ }
+ }
+}"""
+ }
+ })
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Markdown/demos/custom-tag3.py b/components/Markdown/demos/custom-tag3.py
new file mode 100644
index 0000000000000000000000000000000000000000..0dc5f324afd744546d7c09221115ec468b2b1b9a
--- /dev/null
+++ b/components/Markdown/demos/custom-tag3.py
@@ -0,0 +1,31 @@
+import json
+import os
+
+import gradio as gr
+
+import modelscope_studio as mgr
+
+options = ["a", "b", "c"]
+
+
+def resolve_assets(relative_path):
+ return os.path.join(os.path.dirname(__file__), "../resources",
+ relative_path)
+
+
+with open(resolve_assets("./custom_components/custom_select.js"), 'r') as f:
+ custom_select_js = f.read()
+
+with gr.Blocks() as demo:
+ mgr.Markdown(value=f"""
+custom tag:
+""",
+ custom_components={
+ "custom-select": {
+ "props": ["options"],
+ "js": custom_select_js,
+ }
+ })
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Markdown/demos/custom-tag4.py b/components/Markdown/demos/custom-tag4.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6b8f64858e828dfda8dadd432ba6fe53cd31679
--- /dev/null
+++ b/components/Markdown/demos/custom-tag4.py
@@ -0,0 +1,38 @@
+import json
+import os
+
+import gradio as gr
+
+import modelscope_studio as mgr
+
+options = ["a", "b", "c"]
+
+
+def resolve_assets(relative_path):
+ return os.path.join(os.path.dirname(__file__), "../resources",
+ relative_path)
+
+
+with open(resolve_assets("./custom_components/custom_select.js"), 'r') as f:
+ custom_select_js = f.read()
+
+
+def fn(data: gr.EventData):
+ # custom {'index': [0, 1, 0], 'tag': 'custom-select', 'tag_index': 0, 'value': 'option A'}
+ print("custom value", data._data)
+
+
+with gr.Blocks() as demo:
+ md = mgr.Markdown(value=f"""
+custom tag:
+""",
+ custom_components={
+ "custom-select": {
+ "props": ["options"],
+ "js": custom_select_js,
+ }
+ })
+ md.custom(fn=fn)
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Markdown/demos/custom_tags/accordion/accordion-title.py b/components/Markdown/demos/custom_tags/accordion/accordion-title.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b692f010c04d903c794f985541e23b884153345
--- /dev/null
+++ b/components/Markdown/demos/custom_tags/accordion/accordion-title.py
@@ -0,0 +1,19 @@
+import gradio as gr
+
+import modelscope_studio as mgr
+
+with gr.Blocks() as demo:
+ mgr.Markdown("""
+
+
+::accordion-title[Using `tool`]
+
+```json
+{"text": "glorious weather", "resolution": "1024*1024"}
+```
+
+
+""")
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Markdown/demos/custom_tags/accordion/basic.py b/components/Markdown/demos/custom_tags/accordion/basic.py
new file mode 100644
index 0000000000000000000000000000000000000000..724b9f5ca0db2c40b91064129956ad2500dbae9c
--- /dev/null
+++ b/components/Markdown/demos/custom_tags/accordion/basic.py
@@ -0,0 +1,17 @@
+import gradio as gr
+
+import modelscope_studio as mgr
+
+with gr.Blocks() as demo:
+ mgr.Markdown("""
+
+
+```json
+{"text": "glorious weather", "resolution": "1024*1024"}
+```
+
+
+""")
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Markdown/demos/custom_tags/select-box/basic.py b/components/Markdown/demos/custom_tags/select-box/basic.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb03c2fe925703f09c6f97b9cd20d12323b96ec4
--- /dev/null
+++ b/components/Markdown/demos/custom_tags/select-box/basic.py
@@ -0,0 +1,21 @@
+import json
+
+import gradio as gr
+
+import modelscope_studio as mgr
+
+options = [{"label": "A", "value": "a"}, "b", "c"]
+
+with gr.Blocks() as demo:
+ mgr.Markdown(
+ f"""Single Select:
+
+Multiple Select:
+
+Vertical Direction:
+
+
+""", )
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Markdown/demos/custom_tags/select-box/card_shape.py b/components/Markdown/demos/custom_tags/select-box/card_shape.py
new file mode 100644
index 0000000000000000000000000000000000000000..a28ce52c923be16d6aced2636d7c8844d8f0042d
--- /dev/null
+++ b/components/Markdown/demos/custom_tags/select-box/card_shape.py
@@ -0,0 +1,32 @@
+import json
+import os
+
+import gradio as gr
+
+import modelscope_studio as mgr
+
+# Card shape supports setting `imgSrc` as the cover.
+options = [{
+ "label":
+ "A",
+ "imgSrc":
+ os.path.join(os.path.dirname(__file__), '../../../resources/screen.jpeg'),
+ "value":
+ "a"
+}, "b", "c", "d"]
+
+with gr.Blocks() as demo:
+ mgr.Markdown(
+ f"""
+
+Custom Columns:
+
+
+
+Vertical Direction:
+
+
+""")
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Markdown/demos/custom_tags/select-box/card_shape_width_auto.py b/components/Markdown/demos/custom_tags/select-box/card_shape_width_auto.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb97a7c14b56237a2ea32056c94c00c3cf927bae
--- /dev/null
+++ b/components/Markdown/demos/custom_tags/select-box/card_shape_width_auto.py
@@ -0,0 +1,26 @@
+import json
+import os
+
+import gradio as gr
+
+import modelscope_studio as mgr
+
+# Card shape supports setting `imgSrc` as the cover.
+options = [{
+ "label":
+ "A",
+ "imgSrc":
+ os.path.join(os.path.dirname(__file__), '../../../resources/screen.jpeg'),
+ "value":
+ "a"
+}, "b", "c", "d"]
+
+with gr.Blocks() as demo:
+ mgr.Markdown(
+ # item-width="auto"
+ f"""
+
+""", )
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Markdown/demos/custom_tags/select-box/python_events.py b/components/Markdown/demos/custom_tags/select-box/python_events.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd585e7ac9f17ed94553e518e1ca123e76607763
--- /dev/null
+++ b/components/Markdown/demos/custom_tags/select-box/python_events.py
@@ -0,0 +1,24 @@
+import json
+
+import gradio as gr
+
+import modelscope_studio as mgr
+
+options = [{"label": "A", "value": "a"}, "b", "c"]
+
+
+def fn(data: gr.EventData):
+ custom_data = data._data
+ if (custom_data["tag"] == "select-box"):
+ print(custom_data["value"]
+ ) # 'a' or 'b' or 'c', the value set in the options.
+
+
+with gr.Blocks() as demo:
+ md = mgr.Markdown(
+ f"
"
+ )
+ md.custom(fn=fn)
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Markdown/demos/multimodal.py b/components/Markdown/demos/multimodal.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a4f80b49131a36e49a3bcf6f2c6dd0223dbc2d8
--- /dev/null
+++ b/components/Markdown/demos/multimodal.py
@@ -0,0 +1,31 @@
+import os
+
+import gradio as gr
+
+import modelscope_studio as mgr
+
+
+def resolve_assets(relative_path):
+ return os.path.join(os.path.dirname(__file__), "../resources",
+ relative_path)
+
+
+with gr.Blocks() as demo:
+ mgr.Markdown(f"""
+Image
+
+![image]({resolve_assets("bot.jpeg")})
+
+
+
+Video
+
+
+
+Audio
+
+
+""")
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Markdown/demos/select-box.py b/components/Markdown/demos/select-box.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b69862bdf1d79dc6d36646449c0cf0c24c61a2f
--- /dev/null
+++ b/components/Markdown/demos/select-box.py
@@ -0,0 +1,32 @@
+import json
+
+import gradio as gr
+
+import modelscope_studio as mgr
+
+# `label` will display on the page, and `value` is the actual selected value.
+options = [{"label": "A", "value": "a"}, "b", "c"]
+
+with gr.Blocks() as demo:
+ mgr.Markdown(f"""
+Single Select:
+
+Multiple Select:
+
+Vertical Direction:
+
+
+
+Card Shape:
+
+
+
+
+
+
+
+
+""")
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/Markdown/resources/audio.wav b/components/Markdown/resources/audio.wav
new file mode 100644
index 0000000000000000000000000000000000000000..105190ad88e2e177540361de340e54feb1587f3c
Binary files /dev/null and b/components/Markdown/resources/audio.wav differ
diff --git a/components/Markdown/resources/bot.jpeg b/components/Markdown/resources/bot.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..5fde8cc45f61b677c0581e6889b11e269c35be08
Binary files /dev/null and b/components/Markdown/resources/bot.jpeg differ
diff --git a/components/Markdown/resources/custom_components/custom_select.js b/components/Markdown/resources/custom_components/custom_select.js
new file mode 100644
index 0000000000000000000000000000000000000000..ca7e2ae712ca53eff775d1f5b22d9c004720db0b
--- /dev/null
+++ b/components/Markdown/resources/custom_components/custom_select.js
@@ -0,0 +1,26 @@
+(props, cc, { el, onMount }) => {
+ const options = JSON.parse(props.options);
+ el.innerHTML = `
+ ${options
+ .map((option) => {
+ return `
+
+
`;
+ })
+ .join('')}
+ `;
+ onMount(() => {
+ const inputs = Array.from(el.getElementsByTagName('input'));
+ Array.from(el.getElementsByTagName('label')).forEach((label, i) => {
+ label.addEventListener('click', () => {
+ inputs.forEach((input) => {
+ input.checked = false;
+ });
+ const input = label.getElementsByTagName('input')[0];
+ input.checked = true;
+ // Use cc.dispatch to trigger events.
+ cc.dispatch(options[i]);
+ });
+ });
+ });
+};
diff --git a/components/Markdown/resources/dog.mp4 b/components/Markdown/resources/dog.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..062b9c81317de43f392c56e9e03444bf8cc31d51
--- /dev/null
+++ b/components/Markdown/resources/dog.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:39d086ce29e48cf76e5042d2f3f0611ee46575f70fa3dc0c40dd4cfffde3d933
+size 8626383
diff --git a/components/Markdown/resources/screen.jpeg b/components/Markdown/resources/screen.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..574735acb117e86c5c0850e2b5489b8f8efa20cc
Binary files /dev/null and b/components/Markdown/resources/screen.jpeg differ
diff --git a/components/Markdown/resources/user.jpeg b/components/Markdown/resources/user.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..536948b6bd19cb0b49c44b74e2790198301520e5
Binary files /dev/null and b/components/Markdown/resources/user.jpeg differ
diff --git a/components/MultimodalInput/README-zh_CN.md b/components/MultimodalInput/README-zh_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..d98029e5adc6f54f066f900211daf7fb1e8e5e15
--- /dev/null
+++ b/components/MultimodalInput/README-zh_CN.md
@@ -0,0 +1,50 @@
+# MutilmodalInput
+
+多模态输入框,支持上传文件、录音、照相等功能。
+
+- 支持文本输入+文件上传共同提交
+- 支持文件上传时的图片、音频预览
+- 提交内容作为 Chatbot 输入多模态内容作为用户输入问题自动匹配
+- 支持用户录音和拍照
+
+## 如何使用
+
+### 基本使用
+
+
+
+### 与 Chatbot 配合使用
+
+
+
+### 配置上传/提交按钮
+
+
+
+### 允许用户录音或拍照
+
+
+
+## API 及参数列表
+
+以下 API 均为在原有 gradio Textbox 外的额外拓展参数。
+
+### value
+
+接口定义:
+
+```python
+class MultimodalInputData(GradioModel):
+ files: List[Union[FileData, str]] = []
+ text: str
+```
+
+### props
+
+| 属性 | 类型 | 默认值 | 描述 |
+| ------------------- | -------------------------------------------------- | ------------ | ------------------------------------------------------------------------------------------------------------------- |
+| sources | list\[Literal\['upload', 'microphone','webcam'\]\] | \['upload'\] | 上传文件的类型列表。 "upload"会提供上文文件按钮。 "microphone"支持用户录音输入。 "webcam"支持用户照相生成图片或视频 |
+| webcam_props | dict | None | webcam 组件属性,目前支持传入mirror_webcam(bool)、include_audio(bool) |
+| upload_button_props | dict | None | 上传文件按钮属性,同 gradio UploadButton |
+| submit_button_props | dict | None | 提交按钮属性,同 gradio Button |
+| file_preview_props | dict | None | 文件预览组件属性,目前支持传入 height (int) |
diff --git a/components/MultimodalInput/README.md b/components/MultimodalInput/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..b3e1441b24f985bd555561f1384680efcaae6ba5
--- /dev/null
+++ b/components/MultimodalInput/README.md
@@ -0,0 +1,50 @@
+# MutilmodalInput
+
+A multimodal input field that supports uploading files, recording audio, taking photos, etc.
+
+- Supports text input and file upload for joint submission
+- Supports image and audio previews during file upload
+- Submissions serve as Chatbot input, matching multimodal content as user input questions automatically
+- Supports user recording and photography
+
+## How to Use
+
+### Basic Usage
+
+
+
+### Using with Chatbot
+
+
+
+### Configuring Upload/Submit Buttons
+
+
+
+### Allowing User Recording or Photography
+
+
+
+## API and Parameter List
+
+The following APIs are additional expanded parameters beyond the original gradio Textbox.
+
+### value
+
+Interface definition:
+
+```python
+class MultimodalInputData(GradioModel):
+ files: List[Union[FileData, str]] = []
+ text: str
+```
+
+### props
+
+| Attribute | Type | Default Value | Description |
+| ------------------- | ----------------------------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| sources | list[Literal['upload', 'microphone', 'webcam']] | ['upload'] | A list of types for uploading files. "upload" provides an upload file button. "microphone" supports user audio input. "webcam" supports user photography to generate images or videos. |
+| webcam_props | dict | None | webcam component properties, currently supports passing mirror_webcam(bool), include_audio(bool) |
+| upload_button_props | dict | None | Upload file button properties, same as gradio UploadButton |
+| submit_button_props | dict | None | Submit button properties, same as gradio Button |
+| file_preview_props | dict | None | File preview component properties, currently supports passing height (int) |
diff --git a/components/MultimodalInput/app.py b/components/MultimodalInput/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..77372ef291165f2cf77a8944f3f8b18773972190
--- /dev/null
+++ b/components/MultimodalInput/app.py
@@ -0,0 +1,6 @@
+from components.Docs import Docs
+
+docs = Docs(__file__)
+
+if __name__ == "__main__":
+ docs.render().queue().launch()
diff --git a/components/MultimodalInput/demos/basic.py b/components/MultimodalInput/demos/basic.py
new file mode 100644
index 0000000000000000000000000000000000000000..67d7470d29e8d017e88884a9b2783b529f1930d8
--- /dev/null
+++ b/components/MultimodalInput/demos/basic.py
@@ -0,0 +1,16 @@
+import gradio as gr
+
+import modelscope_studio as mgr
+
+
+def fn(value):
+ # value includes `text`` and `files``
+ print(value.text, value.files)
+
+
+with gr.Blocks() as demo:
+ input = mgr.MultimodalInput()
+ input.change(fn=fn, inputs=[input])
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/MultimodalInput/demos/config_buttons.py b/components/MultimodalInput/demos/config_buttons.py
new file mode 100644
index 0000000000000000000000000000000000000000..493c59a64404a565102c57eb373d851a5702c04b
--- /dev/null
+++ b/components/MultimodalInput/demos/config_buttons.py
@@ -0,0 +1,16 @@
+import gradio as gr
+
+import modelscope_studio as mgr
+
+
+def fn(value):
+ print(value.text, value.files)
+
+
+with gr.Blocks() as demo:
+ input = mgr.MultimodalInput(upload_button_props=dict(variant="primary"),
+ submit_button_props=dict(visible=False))
+ input.change(fn=fn, inputs=[input])
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/MultimodalInput/demos/upload_sources.py b/components/MultimodalInput/demos/upload_sources.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e9b5476fdce7d83ee4b9e2f47de91f18a7e42bd
--- /dev/null
+++ b/components/MultimodalInput/demos/upload_sources.py
@@ -0,0 +1,15 @@
+import gradio as gr
+
+import modelscope_studio as mgr
+
+
+def fn(value):
+ print(value.text, value.files)
+
+
+with gr.Blocks() as demo:
+ input = mgr.MultimodalInput(sources=["upload", "microphone", "webcam"])
+ input.change(fn=fn, inputs=[input])
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/MultimodalInput/demos/with_chatbot.py b/components/MultimodalInput/demos/with_chatbot.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ad7784a434bb224edf30b0dcd4e668f43e7de4e
--- /dev/null
+++ b/components/MultimodalInput/demos/with_chatbot.py
@@ -0,0 +1,36 @@
+import time
+
+import gradio as gr
+
+import modelscope_studio as mgr
+
+
+def fn(input, chatbot):
+ chatbot.append([{
+ "text": input.text,
+ "files": input.files,
+ }, None])
+ yield {
+ user_input: mgr.MultimodalInput(interactive=False),
+ user_chatbot: chatbot
+ }
+ time.sleep(2)
+ chatbot[-1][1] = {"text": "Hello!"}
+ yield {user_chatbot: chatbot}
+
+
+# Triggered when the typewriter is ending.
+def flushed():
+ return mgr.MultimodalInput(interactive=True)
+
+
+with gr.Blocks() as demo:
+ user_chatbot = mgr.Chatbot()
+ user_input = mgr.MultimodalInput()
+ user_input.submit(fn=fn,
+ inputs=[user_input, user_chatbot],
+ outputs=[user_input, user_chatbot])
+ user_chatbot.flushed(fn=flushed, outputs=[user_input])
+
+if __name__ == "__main__":
+ demo.queue().launch()
diff --git a/components/parse_markdown.py b/components/parse_markdown.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f2bc78d3b0f241ecb930d43e6f787c8e6a14413
--- /dev/null
+++ b/components/parse_markdown.py
@@ -0,0 +1,79 @@
+from html.parser import HTMLParser
+
+
+def default_read_file(path):
+ with open(path, "r") as f:
+ return f.read()
+
+
+enable_tags = ["demo", "demo-prefix", "demo-suffix", "file"]
+
+
+class MarkdownParser(HTMLParser):
+
+ def __init__(self, read_file=None):
+ super().__init__()
+ self.value = [{"type": "text", "value": ""}]
+ self.tag_stack = []
+ self.read_file = read_file or default_read_file
+ self.current_tag = None
+
+ def get_value(self):
+ return self.value
+
+ def handle_data(self, data: str) -> None:
+ if self.value[-1]["type"] == "text":
+ self.value[-1]["value"] += data
+ elif self.current_tag is None:
+ self.value.append({"type": "text", "value": data})
+ elif self.current_tag == "demo-prefix":
+ self.value[-1]["prefix"] += data
+ elif self.current_tag == "demo-suffix":
+ self.value[-1]["suffix"] += data
+
+ def handle_startendtag(self, tag: str, attrs) -> None:
+ if tag not in enable_tags:
+ self.handle_data(self.get_starttag_text())
+ return
+
+ def handle_starttag(self, tag: str, attrs) -> None:
+ if (tag not in enable_tags):
+ self.handle_data(self.get_starttag_text())
+ return
+ if tag == "demo":
+ self.value.append({
+ "type": "demo",
+ "name": dict(attrs)["name"],
+ "prefix": "",
+ "suffix": ""
+ })
+ elif tag == "file":
+ content = self.read_file(dict(attrs)["src"])
+ if self.value[-1]["type"] == "text":
+ self.value[-1]["value"] += content
+ elif self.current_tag == "demo-prefix":
+ self.value[-1]["prefix"] += content
+ elif self.current_tag == "demo-suffix":
+ self.value[-1]["suffix"] += content
+ self.current_tag = tag
+ self.tag_stack.append(self.current_tag)
+
+ def handle_endtag(self, tag: str) -> None:
+
+ if (tag not in enable_tags):
+ self.handle_data(f"{tag}>")
+ return
+ if (len(self.tag_stack) > 0):
+ self.tag_stack.pop()
+ if (len(self.tag_stack) > 0):
+ self.current_tag = self.tag_stack[-1]
+ else:
+ self.current_tag = None
+ else:
+ self.current_tag = None
+
+
+def parse_markdown(markdown: str, read_file=None):
+ parser = MarkdownParser(read_file=read_file)
+ parser.feed(markdown)
+ return parser.get_value()
diff --git a/components/tab-link.js b/components/tab-link.js
new file mode 100644
index 0000000000000000000000000000000000000000..bcd9143dcafadafedfb4b711e704a6ca7e32b74c
--- /dev/null
+++ b/components/tab-link.js
@@ -0,0 +1,15 @@
+(props, cc, { onMount, el }) => {
+ onMount(() => {
+ el.addEventListener('click', () => {
+ cc.dispatch({
+ tab: props.tab,
+ component_tab: props['component-tab'],
+ });
+ });
+ });
+ const children = props.children[0].value;
+ el.innerHTML = `${children}`;
+ el.style.display = 'inline-block';
+ el.style.cursor = 'pointer';
+ el.style.color = 'var(--link-text-color)';
+};
diff --git a/modelscope_studio-0.0.7-py3-none-any.whl b/modelscope_studio-0.0.7-py3-none-any.whl
new file mode 100644
index 0000000000000000000000000000000000000000..3f0d1e40125707f2286ede45b9ff9b4da54478d7
--- /dev/null
+++ b/modelscope_studio-0.0.7-py3-none-any.whl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:73e59525a7267d6aa81889e36e17e93ec8e0cc15948cdf5dc74b1bc2be124477
+size 6129839
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b18ffc8ddc627245f587f95e04e2c0adafadcede
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,2 @@
+modelscope_studio
+modelscope_studio-0.0.7-py3-none-any.whl
diff --git a/src/pyproject.toml b/src/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..33db92d6a885158f06952c6303101b542db2f176
--- /dev/null
+++ b/src/pyproject.toml
@@ -0,0 +1,54 @@
+[build-system]
+requires = [
+ "hatchling",
+ "hatch-requirements-txt",
+ "hatch-fancy-pypi-readme>=22.5.0",
+]
+build-backend = "hatchling.build"
+
+[project]
+name = "modelscope_studio"
+version = "0.0.7"
+description = "A set of extension component, inluding components for conversational input and display in multimodal scenarios, as well as more components for vertical scenarios."
+readme = "README.md"
+license = "Apache-2.0"
+requires-python = ">=3.8"
+authors = [{ name = "YOUR NAME", email = "YOUREMAIL@domain.com" }]
+keywords = [
+ "gradio-custom-component",
+ "modelscope-studio",
+ "gradio-template-Chatbot",
+]
+# Add dependencies here
+dependencies = ["gradio>=4.0,<5.0"]
+classifiers = [
+ 'Development Status :: 3 - Alpha',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3 :: Only',
+ 'Programming Language :: Python :: 3.8',
+ 'Programming Language :: Python :: 3.9',
+ 'Programming Language :: Python :: 3.10',
+ 'Programming Language :: Python :: 3.11',
+ 'Topic :: Scientific/Engineering',
+ 'Topic :: Scientific/Engineering :: Artificial Intelligence',
+ 'Topic :: Scientific/Engineering :: Visualization',
+]
+
+[project.optional-dependencies]
+dev = ["build", "twine"]
+
+[tool.hatch.build]
+artifacts = [
+ "*.pyi",
+ "backend/modelscope_studio/components/Chatbot/templates",
+ "backend/modelscope_studio/components/MultimodalInput/templates",
+ "backend/modelscope_studio/components/Markdown/templates",
+]
+
+[tool.hatch.build.targets.sdist]
+include = ["/backend/modelscope_studio"]
+
+[tool.hatch.build.targets.wheel]
+packages = ["/backend/modelscope_studio"]