anirudh-sub commited on
Commit
ed619fb
·
1 Parent(s): e8fc346

Changed app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -13
app.py CHANGED
@@ -1,23 +1,35 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
 
3
 
4
- model_name = "anirudh-sub/debate_model_v2"
5
- model = AutoModelForCausalLM.from_pretrained(model_name)
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
 
 
7
 
8
- def generate_text(prompt):
9
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
10
- output = model.generate(input_ids, max_length=100, num_return_sequences=1)
11
- generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
12
- return generated_text
 
 
 
 
 
 
 
 
 
 
13
 
14
  iface = gr.Interface(
15
  fn=generate_text,
16
- inputs="text",
17
  outputs="text",
18
- live=True,
19
- title="Debate Model",
20
- description="This model generates text based on the input prompt using Llama.",
21
  )
22
 
23
  iface.launch()
 
1
  import gradio as gr
2
+ import transformers
3
+ from transformers import AutoTokenizer
4
+ model = "anirudh-sub/debate_model_v2.1"
5
+ tokenizer = AutoTokenizer.from_pretrained(model)
6
 
7
+ pipeline = transformers.pipeline(
8
+ "text-generation",
9
+ model=model,
10
+ device_map="auto",
11
+ )
12
 
13
+ def generate_text(text):
14
+ return "hello" + text
15
+ sequences = pipeline(
16
+ text,
17
+ do_sample=True,
18
+ top_k=10,
19
+ num_return_sequences=1,
20
+ eos_token_id=tokenizer.eos_token_id,
21
+ max_length=500,
22
+ )
23
+ response = ""
24
+ for seq in sequences:
25
+ print(f"Result: {seq['generated_text']}")
26
+ response += {seq['generated_text']}
27
+ return response
28
 
29
  iface = gr.Interface(
30
  fn=generate_text,
31
+ inputs=gr.Textbox(),
32
  outputs="text",
 
 
 
33
  )
34
 
35
  iface.launch()