Bhavibond's picture
Executive function helper using cognitive analysis
eace44f verified
raw
history blame
6.58 kB
import os
import torch
import random
import numpy as np
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
# =============================
# CONFIGURATION
# =============================
MODEL_NAME = "google/mt5-small"
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
DTYPE = torch.float16 if torch.cuda.is_available() else torch.float32
# Load model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME).to(DEVICE).to(DTYPE)
# Seed for reproducibility
SEED = 42
torch.manual_seed(SEED)
random.seed(SEED)
np.random.seed(SEED)
# Track user performance (adaptive learning)
user_performance = {
"memory": [],
"focus": [],
"problem_solving": [],
"emotional_regulation": [],
"planning": [],
"inhibition": [],
"flexible_thinking": [],
}
# =============================
# ADAPTIVE LEARNING FUNCTIONS
# =============================
def calculate_score(task_type, correct):
if correct:
user_performance[task_type].append(1)
else:
user_performance[task_type].append(0)
# Only consider last 3 attempts for adaptive difficulty
success_rate = np.mean(user_performance[task_type][-3:]) if len(user_performance[task_type]) >= 3 else np.mean(user_performance[task_type])
if success_rate > 0.7:
difficulty = "high"
elif success_rate < 0.3:
difficulty = "low"
else:
difficulty = "medium"
return success_rate, difficulty
# =============================
# TASK FUNCTIONS
# =============================
# Memory Training
def memory_task(prompt):
difficulty = calculate_score("memory", True)[1]
length = {"low": 5, "medium": 10, "high": 15}[difficulty]
sequence = ''.join(random.choices('ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789', k=length))
response = f"Memorize this sequence: {sequence}"
return response
# Focus and Attention
def focus_task():
difficulty = calculate_score("focus", True)[1]
word = "SUPERCALIFRAGILISTICEXPIALIDOCIOUS"
vowels = sum(1 for char in word if char in 'AEIOU')
if difficulty == "high":
vowels += random.randint(1, 3)
response = f"How many vowels are in this word: {word}"
return response
# Problem Solving
def problem_solving_task(question):
difficulty = calculate_score("problem_solving", True)[1]
problem_prompt = f"Provide a detailed solution: {question} (difficulty: {difficulty})"
inputs = tokenizer(problem_prompt, return_tensors="pt").to(DEVICE)
with torch.no_grad():
output = model.generate(**inputs, max_length=40)
response = tokenizer.decode(output[0], skip_special_tokens=True)
return response
# Emotional Regulation
def emotional_regulation_task(emotion):
emotion_map = {
"happy": "Reflect on why you're happy. Savor the feeling.",
"sad": "Try to reframe negative thoughts. What's one positive aspect of this situation?",
"angry": "Take 3 deep breaths. Focus on why you feel this way without judgment.",
"fearful": "Ground yourself by describing your surroundings aloud.",
"surprised": "Identify the cause of surprise. How can you integrate this new information?",
"disgusted": "Challenge your initial reaction. Is there another perspective?",
"neutral": "Mindfully observe your thoughts without judgment."
}
response = emotion_map.get(emotion.lower(), "Focus on your breath and stay present.")
return response
# Planning and Organization
def planning_task(goal):
difficulty = calculate_score("planning", True)[1]
planning_prompt = f"Create a structured plan to achieve: {goal} (difficulty: {difficulty})"
inputs = tokenizer(planning_prompt, return_tensors="pt").to(DEVICE)
with torch.no_grad():
output = model.generate(**inputs, max_length=80)
response = tokenizer.decode(output[0], skip_special_tokens=True)
return response
# Inhibition Control
def inhibition_task():
stimuli = ["RED", "BLUE", "GREEN", "YELLOW"]
correct_response = random.choice(stimuli)
response = f"When you see '{correct_response}', say a different color."
return response
# Flexible Thinking
def flexible_thinking_task():
difficulty = calculate_score("flexible_thinking", True)[1]
task = "Name 5 creative uses for a paperclip."
if difficulty == "high":
task = "Name 10 creative uses for a paperclip."
return task
# =============================
# GRADIO INTERFACE
# =============================
def executive_function_interface(task_type, input_text, emotion):
if task_type == "Memory Training":
response = memory_task(input_text)
return response
elif task_type == "Focus and Attention":
response = focus_task()
return response
elif task_type == "Problem-Solving":
response = problem_solving_task(input_text)
return response
elif task_type == "Emotional Regulation":
response = emotional_regulation_task(emotion)
return response
elif task_type == "Planning and Organization":
response = planning_task(input_text)
return response
elif task_type == "Inhibition Control":
response = inhibition_task()
return response
elif task_type == "Flexible Thinking":
response = flexible_thinking_task()
return response
# Dropdown for task type
task_dropdown = gr.Dropdown(
[
"Memory Training", "Focus and Attention", "Problem-Solving",
"Emotional Regulation", "Planning and Organization",
"Inhibition Control", "Flexible Thinking"
],
label="Select Executive Function Task"
)
# Textbox for input prompts
input_text = gr.Textbox(label="Input/Prompt (if applicable)", placeholder="Enter task-related prompt...")
# Dropdown for emotion selection
emotion_dropdown = gr.Dropdown(
["Happy", "Sad", "Angry", "Fearful", "Surprised", "Disgusted", "Neutral"],
label="Select Emotion (For Emotional Regulation)"
)
# Gradio Interface
interface = gr.Interface(
fn=executive_function_interface,
inputs=[task_dropdown, input_text, emotion_dropdown],
outputs="text",
title="Executive Function Enhancer",
description="Enhance executive functions with AI-based adaptive learning and cognitive tasks.",
theme="default"
)
# =============================
# LAUNCH
# =============================
if __name__ == "__main__":
interface.launch()