berkerbatur commited on
Commit
0597783
·
verified ·
1 Parent(s): b47731b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -0
app.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+
4
+ # Load the tokenizer and model
5
+ tokenizer = AutoTokenizer.from_pretrained("sergeantson/GPT2_Medium_Law")
6
+ model = AutoModelForCausalLM.from_pretrained("sergeantson/GPT2_Medium_Law")
7
+
8
+ def generate_text(input_text, max_length, num_return_sequences, temperature, top_k, top_p):
9
+ inputs = tokenizer(input_text, return_tensors="pt")
10
+ output = model.generate(
11
+ **inputs,
12
+ max_length=max_length,
13
+ num_return_sequences=num_return_sequences,
14
+ temperature=temperature,
15
+ top_k=top_k,
16
+ top_p=top_p,
17
+ no_repeat_ngram_size=2 # Prevents repeating n-grams
18
+ )
19
+ generated_texts = [tokenizer.decode(output[i], skip_special_tokens=True) for i in range(num_return_sequences)]
20
+ return "\n\n".join(generated_texts)
21
+
22
+ # Set up the Gradio interface
23
+ iface = gr.Interface(
24
+ fn=generate_text,
25
+ inputs=[
26
+ gr.Textbox(lines=2, placeholder="Enter a prompt here...", label="Input Text"),
27
+ gr.Slider(minimum=10, maximum=200, value=50, step=1, label="Max Length"),
28
+ gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of Return Sequences"),
29
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.1, label="Temperature"),
30
+ gr.Slider(minimum=1, maximum=100, value=50, step=1, label="Top-k"),
31
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.1, label="Top-p")
32
+ ],
33
+ outputs="text",
34
+ title="Legal Text Generator",
35
+ description="Enter a prompt to generate legal text based on the input."
36
+ )
37
+
38
+ # Launch the interface
39
+ iface.launch(share=False)