Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -13,26 +13,39 @@ except ImportError:
|
|
13 |
subprocess.check_call([os.sys.executable, "-m", "pip", "install", "accelerate"])
|
14 |
|
15 |
import gradio as gr
|
16 |
-
from transformers import
|
17 |
import torch
|
18 |
|
19 |
# Check if a GPU is available, otherwise use CPU
|
20 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
21 |
|
22 |
-
# Load
|
23 |
-
model_name = "
|
24 |
-
tokenizer =
|
25 |
-
model =
|
26 |
|
27 |
-
# Function to
|
28 |
def evaluar_prompt(prompt):
|
29 |
try:
|
30 |
-
#
|
31 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
32 |
-
|
33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
-
# Initialize variables to store detailed advice
|
36 |
claridad_consejo = ""
|
37 |
logica_consejo = ""
|
38 |
relevancia_consejo = ""
|
@@ -169,4 +182,4 @@ def interfaz():
|
|
169 |
|
170 |
# Run the interface
|
171 |
demo = interfaz()
|
172 |
-
demo.launch()
|
|
|
13 |
subprocess.check_call([os.sys.executable, "-m", "pip", "install", "accelerate"])
|
14 |
|
15 |
import gradio as gr
|
16 |
+
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
17 |
import torch
|
18 |
|
19 |
# Check if a GPU is available, otherwise use CPU
|
20 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
21 |
|
22 |
+
# Load GPT-2 model and tokenizer
|
23 |
+
model_name = "gpt2" # or "gpt2-medium", "gpt2-large", "gpt2-xl"
|
24 |
+
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
|
25 |
+
model = GPT2LMHeadModel.from_pretrained(model_name).to(device)
|
26 |
|
27 |
+
# Function to generate text using GPT-2
|
28 |
def evaluar_prompt(prompt):
|
29 |
try:
|
30 |
+
# Encode the prompt
|
31 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
32 |
+
|
33 |
+
# Generate text
|
34 |
+
outputs = model.generate(
|
35 |
+
inputs["input_ids"],
|
36 |
+
max_length=150, # Limit max_length for faster results
|
37 |
+
num_return_sequences=1,
|
38 |
+
no_repeat_ngram_size=2,
|
39 |
+
do_sample=True,
|
40 |
+
top_k=50,
|
41 |
+
top_p=0.95,
|
42 |
+
temperature=0.7
|
43 |
+
)
|
44 |
+
|
45 |
+
# Decode the generated text
|
46 |
+
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
47 |
|
48 |
+
# Initialize variables to store detailed advice (using the generated text as context)
|
49 |
claridad_consejo = ""
|
50 |
logica_consejo = ""
|
51 |
relevancia_consejo = ""
|
|
|
182 |
|
183 |
# Run the interface
|
184 |
demo = interfaz()
|
185 |
+
demo.launch()
|