Spaces:
Sleeping
Sleeping
import os | |
import subprocess | |
# Ensure SentencePiece and Accelerate are installed | |
try: | |
import sentencepiece | |
except ImportError: | |
subprocess.check_call([os.sys.executable, "-m", "pip", "install", "sentencepiece"]) | |
try: | |
import accelerate | |
except ImportError: | |
subprocess.check_call([os.sys.executable, "-m", "pip", "install", "accelerate"]) | |
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import torch | |
# Check if a GPU is available, otherwise use CPU | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
# Load a smaller, faster model and tokenizer | |
model_name = "distilgpt2" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name).to(device) | |
# Function to evaluate the prompt using the loaded model | |
def evaluar_prompt(prompt): | |
try: | |
# Generate analysis using the model | |
inputs = tokenizer(prompt, return_tensors="pt").to(device) | |
outputs = model.generate(inputs["input_ids"], max_length=150) # Limit max_length for faster results | |
analysis = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Basic logic to guide the analysis based on critical thinking principles | |
feedback = "Análisis del Prompt:\n" | |
# Check clarity | |
if len(prompt.split()) < 5: | |
feedback += "- Claridad: El prompt es muy breve y puede | |