EchoTasks / app.py
Pamudu13's picture
Update app.py
631135b verified
raw
history blame
14.7 kB
import gradio as gr
import os
import base64
from datetime import datetime
import requests
import trello
from dotenv import load_dotenv
import urllib3
import wave
import audioop
import io
import speech_recognition as sr
# Disable SSL warning
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Load environment variables from .env file
load_dotenv()
SAMBANOVA_API_KEY = "34115dcb-baab-4390-ab5c-e501666f9f4e"
SAMBANOVA_URL = "https://api.sambanova.ai/v1/chat/completions"
# Initialize Trello client
trello_client = trello.TrelloClient(
api_key=os.getenv('TRELLO_API_KEY'),
token=os.getenv('TRELLO_TOKEN')
)
def get_trello_members():
"""Get all members from Trello workspace"""
try:
boards = trello_client.list_boards()
if not boards:
raise Exception("No Trello boards found")
board = boards[0]
members = board.get_members()
return {(member.full_name or member.username): member.id for member in members}
except Exception as e:
print(f"Error fetching Trello members: {str(e)}")
return {}
def process_audio_data(audio_path):
"""Process WAV audio file"""
try:
with wave.open(audio_path, 'rb') as wav_file:
# Get audio parameters
n_channels = wav_file.getnchannels()
sampwidth = wav_file.getsampwidth()
framerate = wav_file.getframerate()
n_frames = wav_file.getnframes()
# Read audio data
audio_data = wav_file.readframes(n_frames)
# Convert to mono if stereo
if n_channels == 2:
audio_data = audioop.tomono(audio_data, sampwidth, 1, 1)
# Convert to 16-bit if needed
if sampwidth != 2:
audio_data = audioop.lin2lin(audio_data, sampwidth, 2)
# Resample to 16kHz if needed
if framerate != 16000:
audio_data, _ = audioop.ratecv(audio_data, 2, 1, framerate, 16000, None)
framerate = 16000
return audio_data, framerate
except Exception as e:
print(f"Error processing audio: {str(e)}")
raise
def transcribe_audio(audio_file):
"""Convert audio to text using Speech Recognition"""
try:
# Initialize recognizer
recognizer = sr.Recognizer()
# Handle different audio input formats
if isinstance(audio_file, tuple):
audio_path = audio_file[0]
else:
audio_path = audio_file
print(f"Processing audio file: {audio_path}")
try:
# Load the audio file
with sr.AudioFile(audio_path) as source:
# Adjust for ambient noise
recognizer.adjust_for_ambient_noise(source)
# Read the audio data
audio_data = recognizer.record(source)
# Perform the transcription with increased timeout
text = recognizer.recognize_google(
audio_data,
language='en-US',
show_all=False,
with_confidence=False
)
if not text:
raise Exception("No transcription results returned")
return text.strip()
except sr.UnknownValueError:
raise Exception("Speech could not be understood. Please try speaking more clearly.")
except sr.RequestError as e:
raise Exception(f"Could not request results from Google Speech Recognition service; {e}")
except Exception as e:
print(f"Transcription error details: {str(e)}")
raise Exception(f"Transcription error: {str(e)}")
def improve_task_description(text):
"""Improve and summarize task description using SambaNova API"""
try:
prompt = f"""Please improve and structure this task description for better clarity and actionability:
Original task: {text}
Please provide:
1. A clear, concise task title
2. Key objectives
3. Suggested deadline (if not specified)
4. Any important details or requirements
"""
headers = {
'Authorization': f'Bearer {SAMBANOVA_API_KEY}',
'Content-Type': 'application/json'
}
data = {
'messages': [
{'role': 'user', 'content': prompt}
],
'model': 'Meta-Llama-3.1-8B-Instruct',
'max_tokens': 2000,
'temperature': 0.7
}
response = requests.post(
SAMBANOVA_URL,
headers=headers,
json=data,
verify=False,
timeout=620
)
if response.status_code != 200:
raise Exception(f"SambaNova API request failed: {response.text}")
improved_text = response.json()['choices'][0]['message']['content']
return improved_text
except Exception as e:
raise Exception(f"Error improving task description: {str(e)}")
def create_trello_card(task_description, selected_members, location=None, urgency="normal"):
"""Create a Trello card with the improved task description"""
try:
boards = trello_client.list_boards()
if not boards:
raise Exception("No Trello boards found")
board = boards[0]
print(f"Using board: {board.name}")
lists = board.list_lists()
if not lists:
raise Exception("No lists found in the board")
todo_list = lists[0]
print(f"Using list: {todo_list.name}")
# Extract title and add timestamp
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M")
title = task_description.split('\n')[0]
# Add urgency to title
urgency_markers = {
"normal": "๐Ÿ“˜",
"high": "โš ๏ธ",
"urgent": "๐Ÿ”ด"
}
urgency_marker = urgency_markers.get(urgency.lower(), "๐Ÿ“˜")
formatted_title = f"[{timestamp}] {urgency_marker} {title}"
location_text = "Remote/Virtual"
location_coords = None
if location:
location_text = location
# Map urgency to status text
urgency_status = {
"normal": "Normal Priority",
"high": "High Priority",
"urgent": "URGENT"
}
status_text = urgency_status.get(urgency.lower(), "Normal Priority")
formatted_description = f"""๐ŸŽฏ TASK DETAILS
------------------------
{task_description}
๐Ÿ“‹ METADATA
------------------------
๐Ÿ•’ Created: {timestamp}
๐Ÿท๏ธ Source: TaskWhisper AI
โšก Priority: {status_text}
๐Ÿ“ Location: {location_text}
โœ… CHECKLIST
------------------------
- [ ] Task reviewed
- [ ] Requirements clear
- [ ] Timeline confirmed
- [ ] Resources identified
๐Ÿ“ NOTES
------------------------
Add your progress notes here...
"""
card = todo_list.add_card(
name=formatted_title,
desc=formatted_description
)
if location_coords:
card.set_pos(location_coords)
# Add label based on urgency
available_labels = board.get_labels()
urgency_colors = {
"normal": "blue",
"high": "yellow",
"urgent": "red"
}
label_color = urgency_colors.get(urgency.lower(), "blue")
# Find and add the appropriate label
priority_label = next((label for label in available_labels if label.color == label_color), None)
if priority_label:
card.add_label(priority_label)
else:
print(f"Warning: {label_color} label not found on board")
# Assign members to card
if selected_members:
for member_id in selected_members:
try:
member = next((m for m in board.get_members() if m.id == member_id), None)
if member:
card.add_member(member)
else:
print(f"Warning: Member with ID {member_id} not found on board")
except Exception as e:
print(f"Error adding member {member_id}: {str(e)}")
return card.url
except Exception as e:
print(f"Trello card creation error details: {str(e)}")
raise Exception(f"Error creating Trello card: {str(e)}")
def process_input(input_text, selected_members):
"""Process input text and create Trello card"""
try:
# Improve the task description
improved_description = improve_task_description(input_text)
# Create Trello card
card_url = create_trello_card(improved_description, selected_members)
# Get member names for display
members_dict = get_trello_members()
member_names = [name for name, mid in members_dict.items()
if mid in selected_members]
return f"""
Original Input:
--------------
{input_text}
Improved Task Description:
------------------------
{improved_description}
Task Created in Trello:
----------------------
Assigned to: {', '.join(member_names) if member_names else 'Not assigned'}
Card URL: {card_url}
"""
except Exception as e:
return f"Error processing input: {str(e)}"
def process_audio(audio_file, selected_members):
"""Process audio input and create Trello card"""
try:
if audio_file is None:
return "Error: No audio file or text provided"
print(f"Audio file type: {type(audio_file)}") # Debug print
print(f"Audio file content: {audio_file}") # Debug print
text = transcribe_audio(audio_file)
return process_input(text, selected_members)
except Exception as e:
print(f"Audio processing error details: {str(e)}") # Debug print
return f"Error processing audio: {str(e)}"
def process_audio_with_members(audio, selected_members):
"""Process audio with selected members"""
try:
if audio is None:
return "Error: Please provide an audio input (record or upload)"
print(f"Received audio input: {type(audio)}")
print(f"Audio content: {audio}")
# Convert selected member names to member IDs
members_dict = get_trello_members()
selected_member_ids = []
for name in (selected_members or []):
if name in members_dict:
selected_member_ids.append(members_dict[name])
else:
print(f"Warning: Member {name} not found in members dictionary")
try:
result = process_audio(audio, selected_member_ids)
return result
except Exception as e:
error_msg = str(e)
if "Speech could not be understood" in error_msg:
return "Could not understand the speech. Please try again with clearer audio."
elif "Could not request results" in error_msg:
return "Network error. Please check your internet connection and try again."
else:
return f"Error processing audio: {error_msg}"
except Exception as e:
print(f"Error in process_audio_with_members: {str(e)}")
return f"Error processing audio with members: {str(e)}"
def process_text_with_members(text, selected_members):
"""Process text with selected members"""
try:
# Convert selected member names to member IDs
members_dict = get_trello_members()
# Debug prints
print(f"Members dict: {members_dict}")
print(f"Selected members: {selected_members}")
selected_member_ids = []
for name in (selected_members or []):
if name in members_dict:
selected_member_ids.append(members_dict[name])
else:
print(f"Warning: Member {name} not found in members dictionary")
return process_input(text, selected_member_ids)
except Exception as e:
print(f"Error in process_text_with_members: {str(e)}")
return f"Error processing text with members: {str(e)}"
# Create Gradio interface
with gr.Blocks(title="TaskWhisper - Smart Task Manager") as interface:
gr.Markdown("# ๐ŸŽ™๏ธ TaskWhisper - Smart Task Manager")
gr.Markdown("Record audio or type your task. The AI will help improve and structure your task description.")
# Get Trello members for the dropdown
members = get_trello_members()
with gr.Tab("Audio Input"):
audio_input = gr.Audio(
label="Record or Upload Audio",
sources=["microphone", "upload"],
type="filepath",
format="wav",
interactive=True
)
gr.Markdown("""
*Instructions:*
- Use microphone to record directly
- Or upload an audio file (WAV format)
- Speak clearly for better results
- Keep background noise minimal
""")
member_dropdown_audio = gr.Dropdown(
choices=list(members.keys()),
multiselect=True,
label="Assign to Members",
info="Select one or more members to assign the task",
value=[]
)
audio_button = gr.Button("Process Audio")
with gr.Tab("Text Input"):
text_input = gr.Textbox(
lines=3,
placeholder="Type your task here (e.g., 'Need to prepare quarterly report with sales data by next Friday')",
label="Text Input"
)
member_dropdown_text = gr.Dropdown(
choices=list(members.keys()),
multiselect=True,
label="Assign to Members",
info="Select one or more members to assign the task",
value=[] # Initialize with empty selection
)
text_button = gr.Button("Process Text")
output = gr.Textbox(
label="Task Details",
lines=15
)
# Set up event handlers
audio_button.click(
fn=process_audio_with_members,
inputs=[audio_input, member_dropdown_audio],
outputs=output
)
text_button.click(
fn=process_text_with_members,
inputs=[text_input, member_dropdown_text],
outputs=output
)
if __name__ == "__main__":
interface.launch(share=True)