File size: 6,576 Bytes
eace44f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
import os
import torch
import random
import numpy as np
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# =============================
# CONFIGURATION
# =============================
MODEL_NAME = "google/mt5-small"
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
DTYPE = torch.float16 if torch.cuda.is_available() else torch.float32

# Load model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME).to(DEVICE).to(DTYPE)

# Seed for reproducibility
SEED = 42
torch.manual_seed(SEED)
random.seed(SEED)
np.random.seed(SEED)

# Track user performance (adaptive learning)
user_performance = {
    "memory": [],
    "focus": [],
    "problem_solving": [],
    "emotional_regulation": [],
    "planning": [],
    "inhibition": [],
    "flexible_thinking": [],
}

# =============================
# ADAPTIVE LEARNING FUNCTIONS
# =============================
def calculate_score(task_type, correct):
    if correct:
        user_performance[task_type].append(1)
    else:
        user_performance[task_type].append(0)
    
    # Only consider last 3 attempts for adaptive difficulty
    success_rate = np.mean(user_performance[task_type][-3:]) if len(user_performance[task_type]) >= 3 else np.mean(user_performance[task_type])
    
    if success_rate > 0.7:
        difficulty = "high"
    elif success_rate < 0.3:
        difficulty = "low"
    else:
        difficulty = "medium"
    
    return success_rate, difficulty

# =============================
# TASK FUNCTIONS
# =============================

# Memory Training
def memory_task(prompt):
    difficulty = calculate_score("memory", True)[1]
    length = {"low": 5, "medium": 10, "high": 15}[difficulty]
    
    sequence = ''.join(random.choices('ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789', k=length))
    response = f"Memorize this sequence: {sequence}"
    
    return response

# Focus and Attention
def focus_task():
    difficulty = calculate_score("focus", True)[1]
    word = "SUPERCALIFRAGILISTICEXPIALIDOCIOUS"
    vowels = sum(1 for char in word if char in 'AEIOU')

    if difficulty == "high":
        vowels += random.randint(1, 3)
    
    response = f"How many vowels are in this word: {word}"
    
    return response

# Problem Solving
def problem_solving_task(question):
    difficulty = calculate_score("problem_solving", True)[1]
    problem_prompt = f"Provide a detailed solution: {question} (difficulty: {difficulty})"
    
    inputs = tokenizer(problem_prompt, return_tensors="pt").to(DEVICE)
    with torch.no_grad():
        output = model.generate(**inputs, max_length=40)
    
    response = tokenizer.decode(output[0], skip_special_tokens=True)
    
    return response

# Emotional Regulation
def emotional_regulation_task(emotion):
    emotion_map = {
        "happy": "Reflect on why you're happy. Savor the feeling.",
        "sad": "Try to reframe negative thoughts. What's one positive aspect of this situation?",
        "angry": "Take 3 deep breaths. Focus on why you feel this way without judgment.",
        "fearful": "Ground yourself by describing your surroundings aloud.",
        "surprised": "Identify the cause of surprise. How can you integrate this new information?",
        "disgusted": "Challenge your initial reaction. Is there another perspective?",
        "neutral": "Mindfully observe your thoughts without judgment."
    }
    response = emotion_map.get(emotion.lower(), "Focus on your breath and stay present.")
    
    return response

# Planning and Organization
def planning_task(goal):
    difficulty = calculate_score("planning", True)[1]
    planning_prompt = f"Create a structured plan to achieve: {goal} (difficulty: {difficulty})"
    
    inputs = tokenizer(planning_prompt, return_tensors="pt").to(DEVICE)
    with torch.no_grad():
        output = model.generate(**inputs, max_length=80)
    
    response = tokenizer.decode(output[0], skip_special_tokens=True)
    
    return response

# Inhibition Control
def inhibition_task():
    stimuli = ["RED", "BLUE", "GREEN", "YELLOW"]
    correct_response = random.choice(stimuli)
    
    response = f"When you see '{correct_response}', say a different color."

    return response

# Flexible Thinking
def flexible_thinking_task():
    difficulty = calculate_score("flexible_thinking", True)[1]
    task = "Name 5 creative uses for a paperclip."
    if difficulty == "high":
        task = "Name 10 creative uses for a paperclip."
    
    return task

# =============================
# GRADIO INTERFACE
# =============================
def executive_function_interface(task_type, input_text, emotion):
    if task_type == "Memory Training":
        response = memory_task(input_text)
        return response
    
    elif task_type == "Focus and Attention":
        response = focus_task()
        return response
    
    elif task_type == "Problem-Solving":
        response = problem_solving_task(input_text)
        return response
    
    elif task_type == "Emotional Regulation":
        response = emotional_regulation_task(emotion)
        return response
    
    elif task_type == "Planning and Organization":
        response = planning_task(input_text)
        return response
    
    elif task_type == "Inhibition Control":
        response = inhibition_task()
        return response
    
    elif task_type == "Flexible Thinking":
        response = flexible_thinking_task()
        return response

# Dropdown for task type
task_dropdown = gr.Dropdown(
    [
        "Memory Training", "Focus and Attention", "Problem-Solving",
        "Emotional Regulation", "Planning and Organization",
        "Inhibition Control", "Flexible Thinking"
    ],
    label="Select Executive Function Task"
)

# Textbox for input prompts
input_text = gr.Textbox(label="Input/Prompt (if applicable)", placeholder="Enter task-related prompt...")

# Dropdown for emotion selection
emotion_dropdown = gr.Dropdown(
    ["Happy", "Sad", "Angry", "Fearful", "Surprised", "Disgusted", "Neutral"],
    label="Select Emotion (For Emotional Regulation)"
)

# Gradio Interface
interface = gr.Interface(
    fn=executive_function_interface,
    inputs=[task_dropdown, input_text, emotion_dropdown],
    outputs="text",
    title="Executive Function Enhancer",
    description="Enhance executive functions with AI-based adaptive learning and cognitive tasks.",
    theme="default"
)

# =============================
# LAUNCH
# =============================
if __name__ == "__main__":
    interface.launch()