anirudh-sub commited on
Commit
6419b2f
·
1 Parent(s): 9dfa995

Got chatgpt to write me some code

Browse files
Files changed (1) hide show
  1. app.py +17 -9
app.py CHANGED
@@ -1,8 +1,6 @@
1
  import gradio as gr
2
  import transformers
3
  from transformers import AutoTokenizer
4
- import torch
5
- from diffusers.utils.torch_utils import randn_tensor
6
 
7
  model = "Glasshes/debate_v3.1_model"
8
  tokenizer = AutoTokenizer.from_pretrained(model)
@@ -12,17 +10,27 @@ pipeline = transformers.pipeline(
12
  device_map="auto",
13
  )
14
 
15
- def debate_response(text):
16
  sequences = pipeline(
17
- "hello \n",
18
  do_sample=True,
19
  top_k=10,
20
  num_return_sequences=1,
21
  eos_token_id=tokenizer.eos_token_id,
22
- max_length=100,
23
  )
24
- return "testing"
25
-
 
 
 
 
26
 
27
- intf = gr.Interface(fn=debate_response, inputs=gr.Textbox(), outputs="text")
28
- intf.launch()
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import transformers
3
  from transformers import AutoTokenizer
 
 
4
 
5
  model = "Glasshes/debate_v3.1_model"
6
  tokenizer = AutoTokenizer.from_pretrained(model)
 
10
  device_map="auto",
11
  )
12
 
13
+ def generate_text(text):
14
  sequences = pipeline(
15
+ text,
16
  do_sample=True,
17
  top_k=10,
18
  num_return_sequences=1,
19
  eos_token_id=tokenizer.eos_token_id,
20
+ max_length=500,
21
  )
22
+ response = ""
23
+ for seq in sequences:
24
+ generated_text = seq['generated_text']
25
+ print(f"Result: {generated_text}")
26
+ response += generated_text
27
+ return response
28
 
29
+ iface = gr.Interface(
30
+ fn=generate_text,
31
+ inputs=gr.Textbox(),
32
+ outputs=gr.Textbox(),
33
+ live=True,
34
+ )
35
+
36
+ iface.launch()