Spaces:
Sleeping
Sleeping
File size: 2,326 Bytes
c48497c 4fb4636 c48497c 4fb4636 c48497c 4fb4636 c48497c 4fb4636 c48497c 4fb4636 c48497c 4fb4636 c48497c 4fb4636 c48497c 4fb4636 c48497c 4fb4636 c48497c 4fb4636 c48497c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
import gradio as gr
import mne
import numpy as np
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Load an open-source LLM model with no additional training
model_name = "tiiuae/falcon-7b-instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
trust_remote_code=True,
torch_dtype=torch.float16,
device_map="auto" # Automatically selects CPU/GPU if available
)
def compute_band_power(psd, freqs, fmin, fmax):
"""Compute mean band power in the given frequency range."""
freq_mask = (freqs >= fmin) & (freqs <= fmax)
# Take the mean across channels and frequencies
band_psd = psd[:, freq_mask].mean()
return float(band_psd)
def process_eeg(file):
# Load EEG data using MNE
# This expects a .fif file containing raw EEG data
raw = mne.io.read_raw_fif(file.name, preload=True)
# Compute PSD (Power Spectral Density) between 1 and 40 Hz
psd, freqs = mne.time_frequency.psd_welch(raw, fmin=1, fmax=40)
# Compute simple band powers
alpha_power = compute_band_power(psd, freqs, 8, 12)
beta_power = compute_band_power(psd, freqs, 13, 30)
# Create a short summary of the extracted features
data_summary = (
f"Alpha power: {alpha_power:.3f}, Beta power: {beta_power:.3f}. "
f"The EEG shows stable alpha rhythms and slightly elevated beta activity."
)
# Prepare the prompt for the language model
prompt = f"""You are a neuroscientist analyzing EEG features.
Data Summary: {data_summary}
Provide a concise, user-friendly interpretation of these findings in simple terms.
"""
# Generate the summary using the LLM
inputs = tokenizer.encode(prompt, return_tensors="pt").to(model.device)
outputs = model.generate(
inputs, max_length=200, do_sample=True, top_k=50, top_p=0.95
)
summary = tokenizer.decode(outputs[0], skip_special_tokens=True)
return summary
iface = gr.Interface(
fn=process_eeg,
inputs=gr.File(label="Upload your EEG data (FIF format)"),
outputs="text",
title="NeuroNarrative-Lite: EEG Summary",
description="Upload EEG data to receive a text-based summary from an open-source language model. No training required!"
)
if __name__ == "__main__":
iface.launch()
|