Dec imports
Browse files
app.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
-
|
2 |
-
import
|
3 |
-
|
4 |
from threading import Thread
|
5 |
from torch import bfloat16
|
6 |
from transformers import Qwen2VLForConditionalGeneration, Qwen2VLProcessor, TextIteratorStreamer, AutoProcessor
|
@@ -18,7 +17,7 @@ min_pixels = 256 * 28 * 28
|
|
18 |
max_pixels = 1280 * 28 * 28
|
19 |
processor: Qwen2VLProcessor = AutoProcessor.from_pretrained(model_path, min_pixels=min_pixels, max_pixels=max_pixels)
|
20 |
|
21 |
-
@
|
22 |
def infer(
|
23 |
messages,
|
24 |
max_tokens,
|
@@ -66,14 +65,14 @@ def respond(
|
|
66 |
for response in infer(messages, max_tokens, temperature, top_p):
|
67 |
yield response
|
68 |
|
69 |
-
app =
|
70 |
respond,
|
71 |
type="messages",
|
72 |
additional_inputs=[
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
],
|
78 |
)
|
79 |
|
|
|
1 |
+
from gradio import ChatInterface, Textbox, Slider
|
2 |
+
from spaces import GPU
|
|
|
3 |
from threading import Thread
|
4 |
from torch import bfloat16
|
5 |
from transformers import Qwen2VLForConditionalGeneration, Qwen2VLProcessor, TextIteratorStreamer, AutoProcessor
|
|
|
17 |
max_pixels = 1280 * 28 * 28
|
18 |
processor: Qwen2VLProcessor = AutoProcessor.from_pretrained(model_path, min_pixels=min_pixels, max_pixels=max_pixels)
|
19 |
|
20 |
+
@GPU
|
21 |
def infer(
|
22 |
messages,
|
23 |
max_tokens,
|
|
|
65 |
for response in infer(messages, max_tokens, temperature, top_p):
|
66 |
yield response
|
67 |
|
68 |
+
app = ChatInterface(
|
69 |
respond,
|
70 |
type="messages",
|
71 |
additional_inputs=[
|
72 |
+
Textbox(value="You are Softie, a helpful assistant.", label="系统设定"),
|
73 |
+
Slider(minimum=1, maximum=2048, value=512, step=1, label="最大生成长度"),
|
74 |
+
Slider(minimum=0.01, maximum=4.0, value=0.75, step=0.01, label="温度系数(Temperature)"),
|
75 |
+
Slider(minimum=0.01, maximum=1.0, value=0.5, step=0.01, label="核取样系数(Top-p)"),
|
76 |
],
|
77 |
)
|
78 |
|