tamatwi commited on
Commit
c74d313
·
verified ·
1 Parent(s): 546a17c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -2
app.py CHANGED
@@ -1,6 +1,6 @@
1
  from transformers import pipeline
2
  import gradio as gr
3
-
4
  # Initialize the text generation pipeline with optimizations
5
  pipe = pipeline("text-generation", model="SakanaAI/EvoLLM-JP-v1-7B")
6
  # Define a function to generate text based on user input
@@ -8,12 +8,18 @@ def generate_text(prompt):
8
  result = pipe(prompt, max_length=50, num_return_sequences=1)
9
  return result[0]['generated_text']
10
 
 
 
 
 
 
 
11
  # Create a Gradio interface with batching enabled
12
  iface = gr.Interface(
13
  fn=generate_text,
14
  inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your prompt here..."),
15
  outputs="text",
16
- title="Text Generation with DiscoPOP-zephyr-7b-gemma",
17
  description="Enter a prompt and the model will generate a continuation of the text.",
18
  batch=True,
19
  max_batch_size=4
 
1
  from transformers import pipeline
2
  import gradio as gr
3
+ import spaces
4
  # Initialize the text generation pipeline with optimizations
5
  pipe = pipeline("text-generation", model="SakanaAI/EvoLLM-JP-v1-7B")
6
  # Define a function to generate text based on user input
 
8
  result = pipe(prompt, max_length=50, num_return_sequences=1)
9
  return result[0]['generated_text']
10
 
11
+ # Define a function to generate text based on user input
12
+ @spaces.GPU
13
+ def generate_text(prompt):
14
+ result = pipe(prompt, max_length=50, num_return_sequences=1)
15
+ return result[0]['generated_text']
16
+
17
  # Create a Gradio interface with batching enabled
18
  iface = gr.Interface(
19
  fn=generate_text,
20
  inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your prompt here..."),
21
  outputs="text",
22
+ title="Text Generation with SakanaAI/EvoLLM-JP-v1-7B",
23
  description="Enter a prompt and the model will generate a continuation of the text.",
24
  batch=True,
25
  max_batch_size=4