Spaces:
Sleeping
Sleeping
Executive function helper using cognitive analysis
Browse files
app.py
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
import random
|
4 |
+
import numpy as np
|
5 |
+
import gradio as gr
|
6 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
7 |
+
|
8 |
+
# =============================
|
9 |
+
# CONFIGURATION
|
10 |
+
# =============================
|
11 |
+
MODEL_NAME = "google/mt5-small"
|
12 |
+
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
13 |
+
DTYPE = torch.float16 if torch.cuda.is_available() else torch.float32
|
14 |
+
|
15 |
+
# Load model and tokenizer
|
16 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
17 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME).to(DEVICE).to(DTYPE)
|
18 |
+
|
19 |
+
# Seed for reproducibility
|
20 |
+
SEED = 42
|
21 |
+
torch.manual_seed(SEED)
|
22 |
+
random.seed(SEED)
|
23 |
+
np.random.seed(SEED)
|
24 |
+
|
25 |
+
# Track user performance (adaptive learning)
|
26 |
+
user_performance = {
|
27 |
+
"memory": [],
|
28 |
+
"focus": [],
|
29 |
+
"problem_solving": [],
|
30 |
+
"emotional_regulation": [],
|
31 |
+
"planning": [],
|
32 |
+
"inhibition": [],
|
33 |
+
"flexible_thinking": [],
|
34 |
+
}
|
35 |
+
|
36 |
+
# =============================
|
37 |
+
# ADAPTIVE LEARNING FUNCTIONS
|
38 |
+
# =============================
|
39 |
+
def calculate_score(task_type, correct):
|
40 |
+
if correct:
|
41 |
+
user_performance[task_type].append(1)
|
42 |
+
else:
|
43 |
+
user_performance[task_type].append(0)
|
44 |
+
|
45 |
+
# Only consider last 3 attempts for adaptive difficulty
|
46 |
+
success_rate = np.mean(user_performance[task_type][-3:]) if len(user_performance[task_type]) >= 3 else np.mean(user_performance[task_type])
|
47 |
+
|
48 |
+
if success_rate > 0.7:
|
49 |
+
difficulty = "high"
|
50 |
+
elif success_rate < 0.3:
|
51 |
+
difficulty = "low"
|
52 |
+
else:
|
53 |
+
difficulty = "medium"
|
54 |
+
|
55 |
+
return success_rate, difficulty
|
56 |
+
|
57 |
+
# =============================
|
58 |
+
# TASK FUNCTIONS
|
59 |
+
# =============================
|
60 |
+
|
61 |
+
# Memory Training
|
62 |
+
def memory_task(prompt):
|
63 |
+
difficulty = calculate_score("memory", True)[1]
|
64 |
+
length = {"low": 5, "medium": 10, "high": 15}[difficulty]
|
65 |
+
|
66 |
+
sequence = ''.join(random.choices('ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789', k=length))
|
67 |
+
response = f"Memorize this sequence: {sequence}"
|
68 |
+
|
69 |
+
return response
|
70 |
+
|
71 |
+
# Focus and Attention
|
72 |
+
def focus_task():
|
73 |
+
difficulty = calculate_score("focus", True)[1]
|
74 |
+
word = "SUPERCALIFRAGILISTICEXPIALIDOCIOUS"
|
75 |
+
vowels = sum(1 for char in word if char in 'AEIOU')
|
76 |
+
|
77 |
+
if difficulty == "high":
|
78 |
+
vowels += random.randint(1, 3)
|
79 |
+
|
80 |
+
response = f"How many vowels are in this word: {word}"
|
81 |
+
|
82 |
+
return response
|
83 |
+
|
84 |
+
# Problem Solving
|
85 |
+
def problem_solving_task(question):
|
86 |
+
difficulty = calculate_score("problem_solving", True)[1]
|
87 |
+
problem_prompt = f"Provide a detailed solution: {question} (difficulty: {difficulty})"
|
88 |
+
|
89 |
+
inputs = tokenizer(problem_prompt, return_tensors="pt").to(DEVICE)
|
90 |
+
with torch.no_grad():
|
91 |
+
output = model.generate(**inputs, max_length=40)
|
92 |
+
|
93 |
+
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
94 |
+
|
95 |
+
return response
|
96 |
+
|
97 |
+
# Emotional Regulation
|
98 |
+
def emotional_regulation_task(emotion):
|
99 |
+
emotion_map = {
|
100 |
+
"happy": "Reflect on why you're happy. Savor the feeling.",
|
101 |
+
"sad": "Try to reframe negative thoughts. What's one positive aspect of this situation?",
|
102 |
+
"angry": "Take 3 deep breaths. Focus on why you feel this way without judgment.",
|
103 |
+
"fearful": "Ground yourself by describing your surroundings aloud.",
|
104 |
+
"surprised": "Identify the cause of surprise. How can you integrate this new information?",
|
105 |
+
"disgusted": "Challenge your initial reaction. Is there another perspective?",
|
106 |
+
"neutral": "Mindfully observe your thoughts without judgment."
|
107 |
+
}
|
108 |
+
response = emotion_map.get(emotion.lower(), "Focus on your breath and stay present.")
|
109 |
+
|
110 |
+
return response
|
111 |
+
|
112 |
+
# Planning and Organization
|
113 |
+
def planning_task(goal):
|
114 |
+
difficulty = calculate_score("planning", True)[1]
|
115 |
+
planning_prompt = f"Create a structured plan to achieve: {goal} (difficulty: {difficulty})"
|
116 |
+
|
117 |
+
inputs = tokenizer(planning_prompt, return_tensors="pt").to(DEVICE)
|
118 |
+
with torch.no_grad():
|
119 |
+
output = model.generate(**inputs, max_length=80)
|
120 |
+
|
121 |
+
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
122 |
+
|
123 |
+
return response
|
124 |
+
|
125 |
+
# Inhibition Control
|
126 |
+
def inhibition_task():
|
127 |
+
stimuli = ["RED", "BLUE", "GREEN", "YELLOW"]
|
128 |
+
correct_response = random.choice(stimuli)
|
129 |
+
|
130 |
+
response = f"When you see '{correct_response}', say a different color."
|
131 |
+
|
132 |
+
return response
|
133 |
+
|
134 |
+
# Flexible Thinking
|
135 |
+
def flexible_thinking_task():
|
136 |
+
difficulty = calculate_score("flexible_thinking", True)[1]
|
137 |
+
task = "Name 5 creative uses for a paperclip."
|
138 |
+
if difficulty == "high":
|
139 |
+
task = "Name 10 creative uses for a paperclip."
|
140 |
+
|
141 |
+
return task
|
142 |
+
|
143 |
+
# =============================
|
144 |
+
# GRADIO INTERFACE
|
145 |
+
# =============================
|
146 |
+
def executive_function_interface(task_type, input_text, emotion):
|
147 |
+
if task_type == "Memory Training":
|
148 |
+
response = memory_task(input_text)
|
149 |
+
return response
|
150 |
+
|
151 |
+
elif task_type == "Focus and Attention":
|
152 |
+
response = focus_task()
|
153 |
+
return response
|
154 |
+
|
155 |
+
elif task_type == "Problem-Solving":
|
156 |
+
response = problem_solving_task(input_text)
|
157 |
+
return response
|
158 |
+
|
159 |
+
elif task_type == "Emotional Regulation":
|
160 |
+
response = emotional_regulation_task(emotion)
|
161 |
+
return response
|
162 |
+
|
163 |
+
elif task_type == "Planning and Organization":
|
164 |
+
response = planning_task(input_text)
|
165 |
+
return response
|
166 |
+
|
167 |
+
elif task_type == "Inhibition Control":
|
168 |
+
response = inhibition_task()
|
169 |
+
return response
|
170 |
+
|
171 |
+
elif task_type == "Flexible Thinking":
|
172 |
+
response = flexible_thinking_task()
|
173 |
+
return response
|
174 |
+
|
175 |
+
# Dropdown for task type
|
176 |
+
task_dropdown = gr.Dropdown(
|
177 |
+
[
|
178 |
+
"Memory Training", "Focus and Attention", "Problem-Solving",
|
179 |
+
"Emotional Regulation", "Planning and Organization",
|
180 |
+
"Inhibition Control", "Flexible Thinking"
|
181 |
+
],
|
182 |
+
label="Select Executive Function Task"
|
183 |
+
)
|
184 |
+
|
185 |
+
# Textbox for input prompts
|
186 |
+
input_text = gr.Textbox(label="Input/Prompt (if applicable)", placeholder="Enter task-related prompt...")
|
187 |
+
|
188 |
+
# Dropdown for emotion selection
|
189 |
+
emotion_dropdown = gr.Dropdown(
|
190 |
+
["Happy", "Sad", "Angry", "Fearful", "Surprised", "Disgusted", "Neutral"],
|
191 |
+
label="Select Emotion (For Emotional Regulation)"
|
192 |
+
)
|
193 |
+
|
194 |
+
# Gradio Interface
|
195 |
+
interface = gr.Interface(
|
196 |
+
fn=executive_function_interface,
|
197 |
+
inputs=[task_dropdown, input_text, emotion_dropdown],
|
198 |
+
outputs="text",
|
199 |
+
title="Executive Function Enhancer",
|
200 |
+
description="Enhance executive functions with AI-based adaptive learning and cognitive tasks.",
|
201 |
+
theme="default"
|
202 |
+
)
|
203 |
+
|
204 |
+
# =============================
|
205 |
+
# LAUNCH
|
206 |
+
# =============================
|
207 |
+
if __name__ == "__main__":
|
208 |
+
interface.launch()
|