miguelcastroe commited on
Commit
4f49924
·
verified ·
1 Parent(s): 1b95ca2

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -0
app.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+
4
+ # Ensure SentencePiece and Accelerate are installed
5
+ try:
6
+ import sentencepiece
7
+ except ImportError:
8
+ subprocess.check_call([os.sys.executable, "-m", "pip", "install", "sentencepiece"])
9
+
10
+ try:
11
+ import accelerate
12
+ except ImportError:
13
+ subprocess.check_call([os.sys.executable, "-m", "pip", "install", "accelerate"])
14
+
15
+ import gradio as gr
16
+ from transformers import AutoTokenizer, AutoModelForCausalLM
17
+ import torch
18
+
19
+ # Check if a GPU is available, otherwise use CPU
20
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
21
+
22
+ # Load a smaller, faster model and tokenizer
23
+ model_name = "distilgpt2"
24
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
25
+ model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
26
+
27
+ # Function to evaluate the prompt using the loaded model
28
+ def evaluar_prompt(prompt):
29
+ try:
30
+ # Generate analysis using the model
31
+ inputs = tokenizer(prompt, return_tensors="pt").to(device)
32
+ outputs = model.generate(inputs["input_ids"], max_length=150) # Limit max_length for faster results
33
+ analysis = tokenizer.decode(outputs[0], skip_special_tokens=True)
34
+
35
+ # Basic logic to guide the analysis based on critical thinking principles
36
+ feedback = "Análisis del Prompt:\n"
37
+
38
+ # Check clarity
39
+ if len(prompt.split()) < 5:
40
+ feedback += "- Claridad: El prompt es muy breve y puede