research14 commited on
Commit
a845f21
·
1 Parent(s): 74d11d9
Files changed (1) hide show
  1. app.py +6 -15
app.py CHANGED
@@ -1,25 +1,16 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
- import torch
4
 
5
- # Load pre-trained GPT-3.5 model and tokenizer (you can replace this with your model)
6
- model_name = "EleutherAI/gpt-neo-2.7B"
7
- tokenizer = AutoTokenizer.from_pretrained(model_name)
8
- model = AutoModelForCausalLM.from_pretrained(model_name)
9
-
10
- def generate_text(input_text, max_length=50):
11
- input_ids = tokenizer.encode(input_text, return_tensors="pt")
12
- output = model.generate(input_ids, max_length=max_length, num_return_sequences=1)
13
- generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
14
- return generated_text
15
 
16
  # Create a Gradio interface
17
  iface = gr.Interface(
18
- fn=generate_text, # Your text generation function
19
  inputs=gr.Textbox(text="Enter text here..."), # Text input field
20
- outputs=gr.Textbox(), # Display generated text
21
  live=True # Real-time updates
22
  )
23
 
24
- # Launch the interface
25
  iface.launch()
 
1
  import gradio as gr
 
 
2
 
3
+ # Define a function that echoes the input text
4
+ def echo_text(input_text):
5
+ return input_text
 
 
 
 
 
 
 
6
 
7
  # Create a Gradio interface
8
  iface = gr.Interface(
9
+ fn=echo_text, # Echo text function
10
  inputs=gr.Textbox(text="Enter text here..."), # Text input field
11
+ outputs=gr.Textbox(), # Display echoed text
12
  live=True # Real-time updates
13
  )
14
 
15
+ # Launch the Gradio interface
16
  iface.launch()