DmitryYarov commited on
Commit
97f5e89
·
verified ·
1 Parent(s): d9ff07a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -2
app.py CHANGED
@@ -1,8 +1,35 @@
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from transformers import pipeline
 
 
3
  model = pipeline("text-generation", model="DmitryYarov/aristotle_based_on_rugpt3large_based_on_gpt")
 
4
  def predict(prompt):
5
- completion = model(prompt)[0]["generated_text"]
6
- return completion
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
 
8
  gr.Interface(fn=predict, inputs="text", outputs="text").launch(share=True)
 
1
+ # import gradio as gr
2
+ # from transformers import pipeline
3
+ # model = pipeline("text-generation", model="DmitryYarov/aristotle_based_on_rugpt3large_based_on_gpt")
4
+ # def predict(prompt):
5
+ # completion = model(prompt)[0]["generated_text"]
6
+ # return completion
7
+
8
+ # gr.Interface(fn=predict, inputs="text", outputs="text").launch(share=True)
9
+
10
  import gradio as gr
11
  from transformers import pipeline
12
+
13
+ # Load the text generation model
14
  model = pipeline("text-generation", model="DmitryYarov/aristotle_based_on_rugpt3large_based_on_gpt")
15
+
16
  def predict(prompt):
17
+ # Generate text with specified parameters for larger and philosophical answers
18
+ completions = model(
19
+ prompt,
20
+ repetition_penalty=2.0, # Penalty for repetitions
21
+ no_repeat_ngram_size=2, # Prevent repetition of bigrams
22
+ max_length=500, # Increased maximum length for more extensive responses
23
+ num_return_sequences=3, # Generate multiple sequences for variety
24
+ num_beams=5, # Beam search for better quality
25
+ do_sample=True, # Enable sampling for generation
26
+ temperature=0.7, # Temperature for randomness in generation
27
+ top_k=50, # Limit sampling to top-K probabilities
28
+ top_p=0.95, # Limit sampling to top-P probabilities (nucleus sampling)
29
+ truncation=True # Truncate text if it exceeds max length
30
+ )
31
+
32
+ return completions
33
 
34
+ # Create a Gradio interface with improved layout options
35
  gr.Interface(fn=predict, inputs="text", outputs="text").launch(share=True)