tamatwi commited on
Commit
d3ff6b3
·
verified ·
1 Parent(s): 7c54fb4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -18
app.py CHANGED
@@ -1,27 +1,54 @@
1
- from transformers import pipeline
2
- import gradio as gr
3
- import spaces
4
  # Initialize the text generation pipeline with optimizations
5
- pipe = pipeline("text-generation", model="SakanaAI/EvoLLM-JP-v1-7B")
6
 
7
 
8
  # Define a function to generate text based on user input
9
- @spaces.GPU
10
- def generate_text(prompt):
11
- result = pipe(prompt, max_length=50, num_return_sequences=1)
12
- return result[0]['generated_text']
13
 
14
  # Create a Gradio interface with batching enabled
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  iface = gr.Interface(
16
- fn=generate_text,
17
- inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
18
- outputs=gr.Textbox(label="生成されたテキスト"),
19
- title="Text Generation with SakanaAI/EvoLLM-JP-v1-7B",
20
- description="Enter a prompt and the model will generate a continuation of the text.",
21
- batch=True,
22
- max_batch_size=4
23
  )
24
 
25
- # Launch the interface
26
- if __name__ == "__main__":
27
- iface.launch()
 
1
+ # from transformers import pipeline
2
+ # import gradio as gr
3
+ # import spaces
4
  # Initialize the text generation pipeline with optimizations
5
+ # pipe = pipeline("text-generation", model="SakanaAI/EvoLLM-JP-v1-7B")
6
 
7
 
8
  # Define a function to generate text based on user input
9
+ # @spaces.GPU
10
+ # def generate_text(prompt):
11
+ # result = pipe(prompt, max_length=50, num_return_sequences=1)
12
+ # return result[0]['generated_text']
13
 
14
  # Create a Gradio interface with batching enabled
15
+ # iface = gr.Interface(
16
+ # fn=generate_text,
17
+ # inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
18
+ # outputs=gr.Textbox(label="生成されたテキスト"),
19
+ # title="Text Generation with SakanaAI/EvoLLM-JP-v1-7B",
20
+ # description="Enter a prompt and the model will generate a continuation of the text.",
21
+ # batch=True,
22
+ # max_batch_size=4
23
+ # )
24
+
25
+ # Launch the interface
26
+ # if __name__ == "__main__":
27
+ # iface.launch()
28
+
29
+
30
+
31
+ import gradio as gr
32
+ from transformers import pipeline, AutoTokenizer
33
+
34
+ # 日本語モデルを指定
35
+ model_name = "SakanaAI/EvoLLM-JP-v1-7B"
36
+
37
+ # トークナイザーとパイプラインの設定
38
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
39
+ generator = pipeline('text-generation', model=model_name, tokenizer=tokenizer, device=-1) # device=0はGPUを使用する設定
40
+
41
+ def generate_text(prompt, max_length):
42
+ result = generator(prompt, max_length=max_length, num_return_sequences=1)
43
+ return result[0]['generated_text']
44
+
45
  iface = gr.Interface(
46
+ fn=generate_text,
47
+ inputs=[
48
+ gr.Textbox(label="プロンプト", placeholder="ここに日本語のプロンプトを入力してください"),
49
+ gr.Slider(minimum=10, maximum=200, value=50, step=1, label="最大長")
50
+ ],
51
+ outputs=gr.Textbox(label="生成されたテキスト")
 
52
  )
53
 
54
+ iface.launch()