EchoTasks / app.py
Pamudu13's picture
Update app.py
0c7787e verified
import gradio as gr
import os
import base64
from datetime import datetime
import requests
import trello
from dotenv import load_dotenv
import urllib3
import wave
import audioop
import io
import speech_recognition as sr
# Disable SSL warning
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Load environment variables from .env file
load_dotenv()
SAMBANOVA_API_KEY = "34115dcb-baab-4390-ab5c-e501666f9f4e"
SAMBANOVA_URL = "https://api.sambanova.ai/v1/chat/completions"
# Initialize Trello client
trello_client = trello.TrelloClient(
api_key=os.getenv('TRELLO_API_KEY'),
token=os.getenv('TRELLO_TOKEN')
)
def get_trello_members():
"""Get all members from Trello workspace"""
try:
boards = trello_client.list_boards()
if not boards:
raise Exception("No Trello boards found")
board = boards[0]
members = board.get_members()
return {(member.full_name or member.username): member.id for member in members}
except Exception as e:
print(f"Error fetching Trello members: {str(e)}")
return {}
def process_audio_data(audio_path):
"""Process WAV audio file"""
try:
with wave.open(audio_path, 'rb') as wav_file:
# Get audio parameters
n_channels = wav_file.getnchannels()
sampwidth = wav_file.getsampwidth()
framerate = wav_file.getframerate()
n_frames = wav_file.getnframes()
# Read audio data
audio_data = wav_file.readframes(n_frames)
# Convert to mono if stereo
if n_channels == 2:
audio_data = audioop.tomono(audio_data, sampwidth, 1, 1)
# Convert to 16-bit if needed
if sampwidth != 2:
audio_data = audioop.lin2lin(audio_data, sampwidth, 2)
# Resample to 16kHz if needed
if framerate != 16000:
audio_data, _ = audioop.ratecv(audio_data, 2, 1, framerate, 16000, None)
framerate = 16000
return audio_data, framerate
except Exception as e:
print(f"Error processing audio: {str(e)}")
raise
def transcribe_audio(audio_file):
"""Convert audio to text using Speech Recognition"""
try:
# Initialize recognizer
recognizer = sr.Recognizer()
# Handle different audio input formats
if isinstance(audio_file, tuple):
audio_path = audio_file[0]
else:
audio_path = audio_file
print(f"Processing audio file: {audio_path}")
try:
# Load the audio file
with sr.AudioFile(audio_path) as source:
# Adjust for ambient noise
recognizer.adjust_for_ambient_noise(source)
# Read the audio data
audio_data = recognizer.record(source)
# Perform the transcription with increased timeout
text = recognizer.recognize_google(
audio_data,
language='en-US',
show_all=False,
with_confidence=False
)
if not text:
raise Exception("No transcription results returned")
return text.strip()
except sr.UnknownValueError:
raise Exception("Speech could not be understood. Please try speaking more clearly.")
except sr.RequestError as e:
raise Exception(f"Could not request results from Google Speech Recognition service; {e}")
except Exception as e:
print(f"Transcription error details: {str(e)}")
raise Exception(f"Transcription error: {str(e)}")
def analyze_emotion(text):
"""Analyze text emotion using Hugging Face API"""
API_URL = "https://api-inference.huggingface.co/models/SamLowe/roberta-base-go_emotions"
headers = {"Authorization": f"Bearer {os.getenv('HUGGINGFACE_API_KEY')}"}
try:
response = requests.post(API_URL, headers=headers, json={"inputs": text})
emotions = response.json()
# Extract emotion scores
if isinstance(emotions, list) and len(emotions) > 0:
emotion_scores = [item for item in emotions[0] if item['label'] != 'neutral']
# Define emotion categories with their respective thresholds
urgent_emotions = {
'anger': 0.15,
'fear': 0.40,
'annoyance': 0.10,
'disapproval': 0.30,
'nervousness': 0.25,
'disgust': 0.20,
'disappointment': 0.40,
'grief': 0.05, # Added more emotional states
'remorse': 0.10,
'sadness': 0.40
}
high_priority_emotions = {
'desire': 0.25,
'excitement': 0.35,
'surprise': 0.15,
'curiosity': 0.25,
'optimism': 0.20,
'pride': 0.10,
'joy': 0.40, # Added more emotional states
'love': 0.25,
'admiration': 0.25,
'gratitude': 0.45
}
# Calculate weighted urgency scores
urgent_score = 0
high_priority_score = 0
for item in emotion_scores:
emotion = item['label']
score = item['score']
if emotion in urgent_emotions and score > urgent_emotions[emotion]:
urgent_score += score
elif emotion in high_priority_emotions and score > high_priority_emotions[emotion]:
high_priority_score += score
# Determine urgency level based on weighted scores
if urgent_score > 0.4: # Adjusted threshold
return "urgent"
elif high_priority_score > 0.3 or urgent_score > 0.2: # Adjusted thresholds
return "high"
return "normal"
return "normal"
except Exception as e:
print(f"Error in emotion analysis: {str(e)}")
return "normal"
def improve_task_description(text):
"""Improve and summarize task description using SambaNova API and emotion analysis"""
try:
# First analyze emotion to get initial urgency assessment
emotion_urgency = analyze_emotion(text)
prompt = f"""Please analyze and structure this task description, including determining its urgency level.
Original task: {text}
Initial emotion-based urgency assessment: {emotion_urgency}
Please provide:
1. A clear, concise task title
2. Key objectives
3. Suggested deadline (if not specified)
4. Any important details or requirements
5. Urgency level assessment (choose one: normal, high, urgent) based on:
- Time-sensitive language (ASAP, immediately, urgent, etc.)
- Deadlines mentioned
- Impact and consequences described
- Business criticality
- Emotional context and tone
Format the response with "URGENCY_LEVEL: [level]" as the first line, followed by the structured description.
Consider the emotion-based urgency assessment provided above when making the final urgency determination.
"""
headers = {
'Authorization': f'Bearer {SAMBANOVA_API_KEY}',
'Content-Type': 'application/json'
}
data = {
'messages': [
{'role': 'user', 'content': prompt}
],
'model': 'Meta-Llama-3.1-8B-Instruct',
'max_tokens': 2000,
'temperature': 0.7
}
response = requests.post(
SAMBANOVA_URL,
headers=headers,
json=data,
verify=False,
timeout=620
)
if response.status_code != 200:
raise Exception(f"SambaNova API request failed: {response.text}")
response_text = response.json()['choices'][0]['message']['content']
# Extract urgency level and description
lines = response_text.split('\n')
urgency_line = lines[0].strip()
# Use emotion-based urgency as fallback
urgency = emotion_urgency
if urgency_line.startswith("URGENCY_LEVEL:"):
level = urgency_line.split(":")[1].strip().lower()
if level in ["normal", "high", "urgent"]:
# Compare with emotion-based urgency and use the higher priority
urgency_levels = {"normal": 0, "high": 1, "urgent": 2}
if urgency_levels[level] > urgency_levels[emotion_urgency]:
urgency = level
description = '\n'.join(lines[1:]).strip()
else:
description = response_text
return description, urgency
except Exception as e:
raise Exception(f"Error improving task description: {str(e)}")
def create_trello_card(task_description, selected_members, location=None, urgency="normal"):
"""Create a Trello card with the improved task description"""
try:
boards = trello_client.list_boards()
if not boards:
raise Exception("No Trello boards found")
board = boards[0]
print(f"Using board: {board.name}")
lists = board.list_lists()
if not lists:
raise Exception("No lists found in the board")
todo_list = lists[0]
print(f"Using list: {todo_list.name}")
# Extract title and add timestamp
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M")
title = task_description.split('\n')[0]
# Add urgency to title
urgency_markers = {
"normal": "๐Ÿ“˜",
"high": "โš ๏ธ",
"urgent": "๐Ÿ”ด"
}
urgency_marker = urgency_markers.get(urgency.lower(), "๐Ÿ“˜")
formatted_title = f"[{timestamp}] {urgency_marker} {title}"
location_text = "Remote/Virtual"
location_coords = None
if location:
location_text = location
# Map urgency to status text
urgency_status = {
"normal": "Normal Priority",
"high": "High Priority",
"urgent": "URGENT"
}
status_text = urgency_status.get(urgency.lower(), "Normal Priority")
formatted_description = f"""๐ŸŽฏ TASK DETAILS
------------------------
{task_description}
๐Ÿ“‹ METADATA
------------------------
๐Ÿ•’ Created: {timestamp}
๐Ÿท๏ธ Source: TaskWhisper AI
โšก Priority: {status_text}
๐Ÿ“ Location: {location_text}
โœ… CHECKLIST
------------------------
- [ ] Task reviewed
- [ ] Requirements clear
- [ ] Timeline confirmed
- [ ] Resources identified
๐Ÿ“ NOTES
------------------------
Add your progress notes here...
"""
card = todo_list.add_card(
name=formatted_title,
desc=formatted_description
)
if location_coords:
card.set_pos(location_coords)
# Add label based on urgency
available_labels = board.get_labels()
urgency_colors = {
"normal": "blue",
"high": "yellow",
"urgent": "red"
}
label_color = urgency_colors.get(urgency.lower(), "blue")
# Find and add the appropriate label
priority_label = next((label for label in available_labels if label.color == label_color), None)
if priority_label:
card.add_label(priority_label)
else:
print(f"Warning: {label_color} label not found on board")
# Assign members to card
if selected_members:
for member_id in selected_members:
try:
member = next((m for m in board.get_members() if m.id == member_id), None)
if member:
card.add_member(member)
else:
print(f"Warning: Member with ID {member_id} not found on board")
except Exception as e:
print(f"Error adding member {member_id}: {str(e)}")
return card.url
except Exception as e:
print(f"Trello card creation error details: {str(e)}")
raise Exception(f"Error creating Trello card: {str(e)}")
def process_input(input_text, selected_members):
"""Process input text and create Trello card"""
try:
# Improve the task description and get urgency
improved_description, urgency = improve_task_description(input_text)
# Create Trello card with detected urgency
card_url = create_trello_card(improved_description, selected_members, urgency=urgency)
# Get member names for display
members_dict = get_trello_members()
member_names = [name for name, mid in members_dict.items()
if mid in selected_members]
urgency_emoji = {"normal": "๐Ÿ“˜", "high": "โš ๏ธ", "urgent": "๐Ÿ”ด"}
return f"""
Original Input:
--------------
{input_text}
Improved Task Description:
------------------------
{improved_description}
Task Created in Trello:
----------------------
Priority: {urgency_emoji.get(urgency, "๐Ÿ“˜")} {urgency.upper()}
Assigned to: {', '.join(member_names) if member_names else 'Not assigned'}
Card URL: {card_url}
"""
except Exception as e:
return f"Error processing input: {str(e)}"
def process_audio(audio_file, selected_members):
"""Process audio input and create Trello card"""
try:
if audio_file is None:
return "Error: No audio file or text provided"
print(f"Audio file type: {type(audio_file)}") # Debug print
print(f"Audio file content: {audio_file}") # Debug print
text = transcribe_audio(audio_file)
return process_input(text, selected_members)
except Exception as e:
print(f"Audio processing error details: {str(e)}") # Debug print
return f"Error processing audio: {str(e)}"
def process_audio_with_members(audio, selected_members):
"""Process audio with selected members"""
try:
if audio is None:
return "Error: Please provide an audio input (record or upload)"
print(f"Received audio input: {type(audio)}")
print(f"Audio content: {audio}")
# Convert selected member names to member IDs
members_dict = get_trello_members()
selected_member_ids = []
for name in (selected_members or []):
if name in members_dict:
selected_member_ids.append(members_dict[name])
else:
print(f"Warning: Member {name} not found in members dictionary")
try:
result = process_audio(audio, selected_member_ids)
return result
except Exception as e:
error_msg = str(e)
if "Speech could not be understood" in error_msg:
return "Could not understand the speech. Please try again with clearer audio."
elif "Could not request results" in error_msg:
return "Network error. Please check your internet connection and try again."
else:
return f"Error processing audio: {error_msg}"
except Exception as e:
print(f"Error in process_audio_with_members: {str(e)}")
return f"Error processing audio with members: {str(e)}"
def process_text_with_members(text, selected_members):
"""Process text with selected members"""
try:
# Convert selected member names to member IDs
members_dict = get_trello_members()
# Debug prints
print(f"Members dict: {members_dict}")
print(f"Selected members: {selected_members}")
selected_member_ids = []
for name in (selected_members or []):
if name in members_dict:
selected_member_ids.append(members_dict[name])
else:
print(f"Warning: Member {name} not found in members dictionary")
return process_input(text, selected_member_ids)
except Exception as e:
print(f"Error in process_text_with_members: {str(e)}")
return f"Error processing text with members: {str(e)}"
# Create Gradio interface
with gr.Blocks(title="TaskWhisper - Smart Task Manager") as interface:
gr.Markdown("# ๐ŸŽ™๏ธ TaskWhisper - Smart Task Manager")
gr.Markdown("Record audio or type your task. The AI will help improve and structure your task description.")
# Get Trello members for the dropdown
members = get_trello_members()
with gr.Tab("Audio Input"):
audio_input = gr.Audio(
label="Record or Upload Audio",
sources=["microphone", "upload"],
type="filepath",
format="wav",
interactive=True
)
gr.Markdown("""
*Instructions:*
- Use microphone to record directly
- Or upload an audio file (WAV format)
- Speak clearly for better results
- Keep background noise minimal
""")
member_dropdown_audio = gr.Dropdown(
choices=list(members.keys()),
multiselect=True,
label="Assign to Members",
info="Select one or more members to assign the task",
value=[]
)
audio_button = gr.Button("Process Audio")
with gr.Tab("Text Input"):
text_input = gr.Textbox(
lines=3,
placeholder="Type your task here (e.g., 'Need to prepare quarterly report with sales data by next Friday')",
label="Text Input"
)
member_dropdown_text = gr.Dropdown(
choices=list(members.keys()),
multiselect=True,
label="Assign to Members",
info="Select one or more members to assign the task",
value=[] # Initialize with empty selection
)
text_button = gr.Button("Process Text")
output = gr.Textbox(
label="Task Details",
lines=15
)
# Set up event handlers
audio_button.click(
fn=process_audio_with_members,
inputs=[audio_input, member_dropdown_audio],
outputs=output
)
text_button.click(
fn=process_text_with_members,
inputs=[text_input, member_dropdown_text],
outputs=output
)
if __name__ == "__main__":
interface.launch(share=True)