mariasaif20 commited on
Commit
98bb2cc
Β·
verified Β·
1 Parent(s): 7ca6a92

Upload 9 files

Browse files
Files changed (9) hide show
  1. AIEMOTBot.py +92 -0
  2. Dockerfile +75 -24
  3. compose.yaml +11 -0
  4. error.py +18 -0
  5. main.py +86 -0
  6. requirements.txt +19 -0
  7. style.css +15 -0
  8. utils.py +52 -0
  9. voicetoemotion.py +43 -0
AIEMOTBot.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+ import tempfile
4
+ import validators
5
+ import os
6
+ from utils import setup_logging, log_error
7
+
8
+ # Custom CSS
9
+ with open('styles.css') as f:
10
+ css = f.read()
11
+
12
+ st.markdown(f'<style>{css}</style>', unsafe_allow_html=True)
13
+
14
+ # Setup Logging
15
+ setup_logging()
16
+
17
+ ## FUNCTIONS
18
+
19
+ # Define function for voicetoemotion page
20
+ def voicetoemotion():
21
+ st.title("Voice to Emotion")
22
+ st.subheader("Upload your voice file and detect emotions")
23
+
24
+ uploaded_voice = st.file_uploader("Upload a voice file", type=["wav"])
25
+
26
+ if uploaded_voice:
27
+ st.audio(uploaded_voice, format='audio/wav')
28
+ if st.button("Detect Emotion"):
29
+ with st.spinner("Detecting emotion..."):
30
+ voice_path = save_uploaded_voice(uploaded_voice)
31
+ emotion = detect_emotion_from_voice(voice_path)
32
+ if emotion:
33
+ st.success(f"Emotion detected: {emotion}")
34
+ else:
35
+ st.error("Failed to detect emotion.")
36
+ import streamlit as st
37
+ from utils import detect_emotion_from_voice
38
+
39
+ # LOGO and TITLE
40
+ # -------------------------------------------------------------------------------------------
41
+ # Show the logo and title side by side
42
+ col1, col2 = st.columns([1, 4])
43
+ with col1:
44
+ st.image("AIEMOTBOT.png", use_column_width=True,)
45
+ with col2:
46
+ st.title("Hi, I am AIEMOTBOT - Your Emotional AI Assistant!")
47
+
48
+ # Main content
49
+ st.header("Upload your voice file and let me detect the emotion!")
50
+ st.subheader("Supported audio formats: WAV, MP3, etc.")
51
+
52
+ # Function to detect emotion from voice
53
+ def detect_emotion():
54
+ uploaded_file = st.file_uploader("Upload a voice file", type=["wav", "mp3"])
55
+ if uploaded_file:
56
+ st.audio(uploaded_file, format='audio/wav')
57
+ if st.button("Detect Emotion"):
58
+ with st.spinner("Detecting emotion..."):
59
+ emotion = detect_emotion_from_voice(uploaded_file)
60
+ if emotion:
61
+ st.success(f"Emotion detected: {emotion}")
62
+ else:
63
+ st.error("Failed to detect emotion.")
64
+
65
+ # Call the function to detect emotion
66
+ detect_emotion()
67
+
68
+ ## WEBSITE LINK
69
+ ## -------------------------------------------------------------------------------------------
70
+ # Load the website content, then save it into a vector store, and enable the input field to
71
+ # ask a question
72
+ st.session_state['uploaded_link'] = False
73
+ if website_link is not None:
74
+ if website_link:
75
+ # Ensure that the user has entered a correct URL
76
+ if validators.url(website_link):
77
+ try:
78
+ # Send POST request to a FastAPI endpoint to scrape the webpage and load its text
79
+ # into a vector store
80
+ FASTAPI_URL = f"http://localhost:8000/load_link/{llm}"
81
+ data = {"website_link": website_link}
82
+ with st.spinner("Loading website..."):
83
+ response = requests.post(FASTAPI_URL, json=data)
84
+ st.success(response.text)
85
+ st.session_state['current_website'] = website_link
86
+ st.session_state['uploaded_link'] = True
87
+ st.switch_page("pages/Web-chat.py")
88
+ except Exception as e:
89
+ log_error(str(e))
90
+ st.switch_page("pages/error.py")
91
+ else:
92
+ st.error("Invalid URL. Please enter a valid URL.")
Dockerfile CHANGED
@@ -1,24 +1,75 @@
1
- FROM ghcr.io/livebook-dev/livebook:latest-cuda11.8
2
-
3
- ENV LIVEBOOK_APP_SERVICE_NAME "🐳 Hugging Face - $SPACE_TITLE"
4
- ENV LIVEBOOK_APP_SERVICE_URL "https://huggingface.co/spaces/$SPACE_AUTHOR_NAME/$SPACE_REPO_NAME"
5
- ENV LIVEBOOK_UPDATE_INSTRUCTIONS_URL "https://livebook.dev"
6
- ENV LIVEBOOK_WITHIN_IFRAME "true"
7
- ENV LIVEBOOK_APPS_PATH "/public-apps"
8
- ENV LIVEBOOK_APPS_PATH_WARMUP "manual"
9
- ENV LIVEBOOK_DATA_PATH "/data"
10
- ENV LIVEBOOK_PORT 7860
11
-
12
- EXPOSE 7860
13
-
14
- RUN mkdir -p /data
15
- RUN chmod 777 /data
16
-
17
- # The Space container runs with user ID 1000
18
- RUN useradd -m -u 1000 user
19
- ENV HOME=/home/user
20
-
21
- USER user
22
-
23
- COPY --chown=user public-apps/ /public-apps
24
- RUN /app/bin/warmup_apps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # syntax=docker/dockerfile:1
2
+
3
+ # Comments are provided throughout this file to help you get started.
4
+ # If you need more help, visit the Dockerfile reference guide at
5
+ # https://docs.docker.com/go/dockerfile-reference/
6
+
7
+ # Want to help us make this template better? Share your feedback here: https://forms.gle/ybq9Krt8jtBL3iCk7
8
+
9
+ ARG PYTHON_VERSION=3.11.9
10
+ FROM python:${PYTHON_VERSION}-slim as base
11
+
12
+ # Prevents Python from writing pyc files.
13
+ ENV PYTHONDONTWRITEBYTECODE=1
14
+
15
+ # Keeps Python from buffering stdout and stderr to avoid situations where
16
+ # the application crashes without emitting any logs due to buffering.
17
+ ENV PYTHONUNBUFFERED=1
18
+
19
+ # Set the LANGCHAIN PROJECT environment variable
20
+ ENV LANGCHAIN_PROJECT="AIEMOTBot"
21
+
22
+ WORKDIR /app
23
+
24
+ # Create a non-privileged user that the app will run under.
25
+ # See https://docs.docker.com/go/dockerfile-user-best-practices/
26
+ ARG UID=10001
27
+ RUN adduser \
28
+ --disabled-password \
29
+ --gecos "" \
30
+ --home "/nonexistent" \
31
+ --shell "/sbin/nologin" \
32
+ --no-create-home \
33
+ --uid "${UID}" \
34
+ appuser
35
+
36
+ # Download dependencies as a separate step to take advantage of Docker's caching.
37
+ # Leverage a cache mount to /root/.cache/pip to speed up subsequent builds.
38
+ # Leverage a bind mount to requirements.txt to avoid having to copy them into
39
+ # into this layer.
40
+ RUN --mount=type=cache,target=/root/.cache/pip \
41
+ --mount=type=bind,source=requirements.txt,target=requirements.txt \
42
+ python -m pip install -r requirements.txt
43
+
44
+ # Create a directory named 'data' and assign its ownership to appuser
45
+ RUN mkdir -p /data
46
+ RUN chown appuser /data
47
+
48
+ # Create a directory named 'images' and assign its ownership to appuser
49
+ RUN mkdir -p /images
50
+ RUN chown appuser /images
51
+
52
+ # Create the app.log file
53
+ RUN touch app.log
54
+
55
+ # Assign the ownership of app.log to appuser
56
+ RUN chown appuser app.log
57
+
58
+ # Switch to the non-privileged user to run the application.
59
+ USER appuser
60
+
61
+ # Set the TRANSFORMERS_CACHE environment variable
62
+ ENV TRANSFORMERS_CACHE=/tmp/.cache/huggingface
63
+
64
+ # Create the cache folder with appropriate permissions
65
+ RUN mkdir -p $TRANSFORMERS_CACHE && chmod -R 777 $TRANSFORMERS_CACHE
66
+
67
+ # Copy the source code into the container.
68
+ COPY . .
69
+
70
+ # Expose the port that the application listens on.
71
+ EXPOSE 8000
72
+
73
+ # Run the application.
74
+ # CMD uvicorn 'main:app' --host=0.0.0.0 --port=8000
75
+ CMD ["bash", "-c", "uvicorn main:app --host 0.0.0.0 --port 8000 & streamlit run AIEMOTBot.py --server.port 8501"]
compose.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '3.8'
2
+
3
+ # Define services for your application
4
+ services:
5
+ aiemotbot:
6
+ # Build the service from the Dockerfile in the current directory
7
+ build:
8
+ context: .
9
+ # Expose port 8000 on the container to port 8000 on the host
10
+ ports:
11
+ - "8000:8000"
error.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ # Custom CSS
4
+ with open('style.css') as f: # Change 'styles.css' to 'style.css'
5
+ css = f.read()
6
+
7
+ st.markdown(f'<style>{css}</style>', unsafe_allow_html=True)
8
+
9
+ ## LOGO and TITLE
10
+ ## -------------------------------------------------------------------------------------------
11
+ # Show the logo and title side by side
12
+ col1, col2 = st.columns([1, 4])
13
+ with col1:
14
+ st.image("AIEMOTBOT.png", width=100) # Change 'brainbot.png' to 'AIEMOTBOT.png'
15
+ with col2:
16
+ st.title("Error")
17
+
18
+ st.error("Oops - Something went wrong! Please try again.")
main.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from pydantic import BaseModel
3
+ from contextlib import asynccontextmanager
4
+ from dotenv import load_dotenv
5
+ import base64
6
+ import requests
7
+ import os
8
+
9
+ ## APPLICATION LIFESPAN
10
+ # Load the environment variables using FastAPI lifespan event so that they are available throughout the application
11
+ @asynccontextmanager
12
+ async def lifespan(app: FastAPI):
13
+ # Load the environment variables
14
+ load_dotenv()
15
+ yield
16
+
17
+ ## FASTAPI APP
18
+ # Initialize the FastAPI app
19
+ app = FastAPI(lifespan=lifespan)
20
+
21
+ ## PYDANTIC MODELS
22
+ # Define a Voice Pydantic model for the request body
23
+ class Voice(BaseModel):
24
+ audio_content: str
25
+
26
+ ## FUNCTIONS
27
+ # Function to encode the audio
28
+ def encode_audio(audio_content):
29
+ return base64.b64encode(audio_content.encode()).decode('utf-8')
30
+
31
+ # Function to detect emotion and generate emojis
32
+ def detect_emotion_and_generate_emoji(audio_content):
33
+ try:
34
+ # Get the base64 string
35
+ base64_audio = encode_audio(audio_content)
36
+
37
+ # Make a request to the emotion detection API
38
+ headers = {
39
+ "Content-Type": "application/json",
40
+ "Authorization": f"Bearer {os.environ['EMOTION_API_KEY']}"
41
+ }
42
+
43
+ payload = {
44
+ "audio_content": base64_audio
45
+ }
46
+
47
+ response = requests.post("https://api.emotion-analysis.com/detect", headers=headers, json=payload)
48
+ response_data = response.json()
49
+
50
+ # Process the emotion data and generate emojis
51
+ # Assuming the response_data contains the detected emotion (e.g., "happy", "sad", "angry", etc.)
52
+ # You would write logic here to map emotions to emojis
53
+
54
+ # For demonstration, let's assume we have a function to generate emojis based on detected emotion
55
+ emojis = generate_emojis(response_data['emotion'])
56
+
57
+ return emojis
58
+ except Exception as e:
59
+ # Handle errors
60
+ raise HTTPException(status_code=500, detail=str(e))
61
+
62
+ # Function to generate emojis based on detected emotion
63
+ def generate_emojis(emotion):
64
+ # This is just a placeholder function
65
+ # You would replace this with your actual logic to generate emojis based on the detected emotion
66
+ if emotion == "happy":
67
+ return "πŸ˜ŠπŸ˜„πŸ₯³"
68
+ elif emotion == "sad":
69
+ return "πŸ˜’πŸ˜”πŸ˜ž"
70
+ elif emotion == "angry":
71
+ return "😑😀🀬"
72
+ else:
73
+ return "πŸ˜πŸ€”πŸ˜Ά"
74
+
75
+ ## FASTAPI ENDPOINTS
76
+ ## POST - /detect_emotion
77
+ # Detect emotion from voice content and generate emojis
78
+ @app.post("/detect_emotion")
79
+ async def detect_emotion(voice: Voice):
80
+ try:
81
+ # Call the function to detect emotion and generate emojis
82
+ emojis = detect_emotion_and_generate_emoji(voice.audio_content)
83
+ return emojis
84
+ except Exception as e:
85
+ # Handle errors
86
+ raise HTTPException(status_code=500, detail=str(e))
requirements.txt ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bs4
2
+ docx2txt
3
+ faiss-cpu
4
+ fastapi
5
+ langchain
6
+ langchain-community
7
+ langchain-core
8
+ langchain-groq
9
+ langchain-openai
10
+ Pillow
11
+ pydantic
12
+ pypdf2
13
+ python-dotenv
14
+ python-pptx
15
+ requests
16
+ sentence-transformers
17
+ streamlit
18
+ validators
19
+ uvicorn
style.css ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Set border radius for images */
2
+ img {
3
+ border-radius: 10px;
4
+ }
5
+
6
+ /* Set background gradient for the entire app */
7
+ .stApp {
8
+ background: linear-gradient(to bottom, rgba(255, 165, 0, 1) 0%, rgba(255, 192, 203, 1) 100%);
9
+ /* You can adjust the colors as per your preference */
10
+ }
11
+
12
+ /* Hide the second item in unordered lists */
13
+ ul li:nth-child(2) {
14
+ display: none;
15
+ }
utils.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import os
3
+ import logging
4
+
5
+ class AIEMOTBot:
6
+ def __init__(self):
7
+ self.setup_logging()
8
+
9
+ ## LOGGING CONFIGURATION
10
+ ## -------------------------------------------------------------------------------------------
11
+ # Configure logging to write to a file
12
+ def setup_logging(self):
13
+ logging.basicConfig(filename='app.log', level=logging.ERROR)
14
+
15
+ def log_error(self, message):
16
+ logging.error(message)
17
+
18
+ ## HELPER FUNCTIONS
19
+ ## ------------------------------------------------------------------------------------------
20
+ # Function to format response received from a FastAPI endpoint
21
+ @staticmethod
22
+ def format_response(response_text):
23
+ # Replace \n with newline character in markdown
24
+ response_text = re.sub(r'\\n', '\n', response_text)
25
+
26
+ # Check for bullet points and replace with markdown syntax
27
+ response_text = re.sub(r'^\s*-\s+(.*)$', r'* \1', response_text, flags=re.MULTILINE)
28
+
29
+ # Check for numbered lists and replace with markdown syntax
30
+ response_text = re.sub(r'^\s*\d+\.\s+(.*)$', r'1. \1', response_text, flags=re.MULTILINE)
31
+
32
+ # Check for headings and replace with markdown syntax
33
+ response_text = re.sub(r'^\s*(#+)\s+(.*)$', r'\1 \2', response_text, flags=re.MULTILINE)
34
+
35
+ return response_text
36
+
37
+ # Function to unlink all images when the application closes
38
+ @staticmethod
39
+ def unlink_images(folder_path):
40
+ # List all files in the folder
41
+ image_files = os.listdir(folder_path)
42
+
43
+ # Iterate over image files and unlink them
44
+ for image_file in image_files:
45
+ try:
46
+ os.unlink(os.path.join(folder_path, image_file))
47
+ print(f"Deleted: {image_file}")
48
+ except Exception as e:
49
+ print(f"Error deleting {image_file}: {e}")
50
+
51
+ if __name__ == "__main__":
52
+ bot = AIEMOTBot()
voicetoemotion.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import speech_recognition as sr
3
+ from textblob import TextBlob
4
+
5
+ # Initialize Speech Recognition
6
+ recognizer = sr.Recognizer()
7
+
8
+ # Function to analyze sentiment and generate emoji
9
+ def analyze_sentiment(text):
10
+ blob = TextBlob(text)
11
+ sentiment_score = blob.sentiment.polarity
12
+ if sentiment_score > 0.5:
13
+ emoji = "😊"
14
+ elif sentiment_score < -0.5:
15
+ emoji = "😞"
16
+ else:
17
+ emoji = "😐"
18
+ return emoji
19
+
20
+ # Streamlit app
21
+ st.title("Voice-to-Text Chatbot")
22
+
23
+ # Record voice input
24
+ with st.echo():
25
+ st.write("Click the button and speak...")
26
+ try:
27
+ with sr.Microphone() as source:
28
+ st.write("Recording...")
29
+ audio_data = recognizer.record(source, duration=5) # Adjust duration as needed
30
+ st.write("Processing...")
31
+
32
+ # Convert speech to text
33
+ text = recognizer.recognize_google(audio_data)
34
+ st.write(f"You said: {text}")
35
+
36
+ # Analyze sentiment and generate emoji
37
+ emoji = analyze_sentiment(text)
38
+ st.write(f"Sentiment: {emoji}")
39
+
40
+ except sr.RequestError as e:
41
+ st.error("Could not request results; {0}".format(e))
42
+ except sr.UnknownValueError:
43
+ st.error("Could not understand audio")