miguelcastroe commited on
Commit
2244c96
·
verified ·
1 Parent(s): 100ce82

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -11
app.py CHANGED
@@ -13,26 +13,39 @@ except ImportError:
13
  subprocess.check_call([os.sys.executable, "-m", "pip", "install", "accelerate"])
14
 
15
  import gradio as gr
16
- from transformers import AutoTokenizer, AutoModelForCausalLM
17
  import torch
18
 
19
  # Check if a GPU is available, otherwise use CPU
20
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
21
 
22
- # Load a smaller, faster model and tokenizer
23
- model_name = "distilgpt2"
24
- tokenizer = AutoTokenizer.from_pretrained(model_name)
25
- model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
26
 
27
- # Function to evaluate the prompt using the loaded model
28
  def evaluar_prompt(prompt):
29
  try:
30
- # Generate analysis using the model
31
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
32
- outputs = model.generate(inputs["input_ids"], max_length=150) # Limit max_length for faster results
33
- analysis = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
- # Initialize variables to store detailed advice
36
  claridad_consejo = ""
37
  logica_consejo = ""
38
  relevancia_consejo = ""
@@ -169,4 +182,4 @@ def interfaz():
169
 
170
  # Run the interface
171
  demo = interfaz()
172
- demo.launch()
 
13
  subprocess.check_call([os.sys.executable, "-m", "pip", "install", "accelerate"])
14
 
15
  import gradio as gr
16
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel
17
  import torch
18
 
19
  # Check if a GPU is available, otherwise use CPU
20
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
21
 
22
+ # Load GPT-2 model and tokenizer
23
+ model_name = "gpt2" # or "gpt2-medium", "gpt2-large", "gpt2-xl"
24
+ tokenizer = GPT2Tokenizer.from_pretrained(model_name)
25
+ model = GPT2LMHeadModel.from_pretrained(model_name).to(device)
26
 
27
+ # Function to generate text using GPT-2
28
  def evaluar_prompt(prompt):
29
  try:
30
+ # Encode the prompt
31
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
32
+
33
+ # Generate text
34
+ outputs = model.generate(
35
+ inputs["input_ids"],
36
+ max_length=150, # Limit max_length for faster results
37
+ num_return_sequences=1,
38
+ no_repeat_ngram_size=2,
39
+ do_sample=True,
40
+ top_k=50,
41
+ top_p=0.95,
42
+ temperature=0.7
43
+ )
44
+
45
+ # Decode the generated text
46
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
47
 
48
+ # Initialize variables to store detailed advice (using the generated text as context)
49
  claridad_consejo = ""
50
  logica_consejo = ""
51
  relevancia_consejo = ""
 
182
 
183
  # Run the interface
184
  demo = interfaz()
185
+ demo.launch()