Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,8 +4,8 @@ import torch
|
|
4 |
from gtts import gTTS
|
5 |
|
6 |
# Qwenモデルの読み込み
|
7 |
-
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-
|
8 |
-
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-
|
9 |
|
10 |
# モデルがテキストを生成する関数
|
11 |
def generate_text(prompt, max_length, temperature, top_p):
|
@@ -37,7 +37,7 @@ def generate_and_speak(prompt, max_length, temperature, top_p):
|
|
37 |
with gr.Blocks() as demo:
|
38 |
# 入力欄(プロンプト、長さ、温度、Top-p)
|
39 |
prompt = gr.Textbox(label="プロンプトを入力してください")
|
40 |
-
max_length = gr.Slider(10, 1000, value=
|
41 |
temperature = gr.Slider(0.1, 1.0, value=0.7, step=0.1, label="生成温度")
|
42 |
top_p = gr.Slider(0.0, 1.0, value=0.95, step=0.05, label="Top-p")
|
43 |
|
|
|
4 |
from gtts import gTTS
|
5 |
|
6 |
# Qwenモデルの読み込み
|
7 |
+
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct")
|
8 |
+
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct")
|
9 |
|
10 |
# モデルがテキストを生成する関数
|
11 |
def generate_text(prompt, max_length, temperature, top_p):
|
|
|
37 |
with gr.Blocks() as demo:
|
38 |
# 入力欄(プロンプト、長さ、温度、Top-p)
|
39 |
prompt = gr.Textbox(label="プロンプトを入力してください")
|
40 |
+
max_length = gr.Slider(10, 1000, value=400, step=10, label="最大長")
|
41 |
temperature = gr.Slider(0.1, 1.0, value=0.7, step=0.1, label="生成温度")
|
42 |
top_p = gr.Slider(0.0, 1.0, value=0.95, step=0.05, label="Top-p")
|
43 |
|