app ready
Browse files- .gitignore +5 -0
- app.py +52 -0
- requirements.txt +5 -0
- src/__init__.py +0 -0
- src/config.py +32 -0
- src/exception.py +11 -0
- src/llm.py +79 -0
- src/log.py +18 -0
- src/utils.py +37 -0
.gitignore
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.env
|
2 |
+
venv/
|
3 |
+
__pycache__
|
4 |
+
docs
|
5 |
+
logs/application.log
|
app.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from src import utils
|
3 |
+
from src.llm import EmotionalChatbot
|
4 |
+
from src.log import logger
|
5 |
+
|
6 |
+
# Streamlit UI Setup
|
7 |
+
st.set_page_config(
|
8 |
+
page_title="Emotional Intelligence Bot",
|
9 |
+
page_icon="🤖",
|
10 |
+
layout="wide"
|
11 |
+
)
|
12 |
+
|
13 |
+
# Initialize session state for chat history
|
14 |
+
if "chat_history" not in st.session_state:
|
15 |
+
st.session_state.chat_history = []
|
16 |
+
|
17 |
+
try:
|
18 |
+
# Initialize the chatbot
|
19 |
+
chatbot = EmotionalChatbot(chat_history=st.session_state.chat_history)
|
20 |
+
except Exception as e:
|
21 |
+
logger.critical("Failed to initialize the chatbot", exc_info=True)
|
22 |
+
st.error("Unable to initialize the bot. Check logs for more details.")
|
23 |
+
|
24 |
+
st.markdown(utils.styles(), unsafe_allow_html=True)
|
25 |
+
|
26 |
+
st.header(":rainbow[Lumina] - :blue[EI Bot] 🤗")
|
27 |
+
|
28 |
+
st.sidebar.markdown(utils.sidebar_markdown())
|
29 |
+
|
30 |
+
# Display chat history
|
31 |
+
for message in st.session_state.chat_history:
|
32 |
+
for role, content in message.items():
|
33 |
+
with st.chat_message(role):
|
34 |
+
st.write(content)
|
35 |
+
|
36 |
+
# User input
|
37 |
+
user_input = st.chat_input("Chat With Lumina...")
|
38 |
+
if user_input:
|
39 |
+
with st.chat_message("user"):
|
40 |
+
st.write(user_input)
|
41 |
+
|
42 |
+
# Process the user input and generate bot response
|
43 |
+
try:
|
44 |
+
with st.spinner("Thinking..."):
|
45 |
+
response = chatbot.generate_response(user_input)
|
46 |
+
message = {'human':user_input, 'AI':response}
|
47 |
+
st.session_state.chat_history.append(message)
|
48 |
+
with st.chat_message("assistant"):
|
49 |
+
st.write(response)
|
50 |
+
except Exception as e:
|
51 |
+
logger.error(f"Error generating bot response: {e}")
|
52 |
+
st.error("An error occurred. Check logs for more details.")
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate
|
2 |
+
langchain
|
3 |
+
langchain-groq
|
4 |
+
python-dotenv
|
5 |
+
streamlit
|
src/__init__.py
ADDED
File without changes
|
src/config.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
|
4 |
+
load_dotenv()
|
5 |
+
|
6 |
+
class AppConfig:
|
7 |
+
|
8 |
+
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
|
9 |
+
|
10 |
+
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
|
11 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
12 |
+
|
13 |
+
# SENTI_API_URL = "https://api-inference.huggingface.co/models/distilbert/distilbert-base-uncased-finetuned-sst-2-english"
|
14 |
+
# EMOTION_API_URL = "https://api-inference.huggingface.co/models/bhadresh-savani/distilbert-base-uncased-emotion"
|
15 |
+
|
16 |
+
SYSTEM_PROMPT = (
|
17 |
+
"You are a compassionate and emotionally intelligent AI Assistant."
|
18 |
+
"Your name is Lumina, and you are an emotionally supportive friend."
|
19 |
+
"Your role is to actively listen, understand needs, and respond with kindness, empathy, and positivity."
|
20 |
+
"Respond precisely and clearly (max 20 words for concise responses)."
|
21 |
+
"For detailed empathetic responses, use no more than 30 words."
|
22 |
+
)
|
23 |
+
|
24 |
+
LLM_CONFIG = {
|
25 |
+
"model": "llama3-70b-8192",
|
26 |
+
"api_key": GROQ_API_KEY,
|
27 |
+
"temperature": 0.5,
|
28 |
+
"max_retries": 2,
|
29 |
+
"streaming": True
|
30 |
+
}
|
31 |
+
|
32 |
+
MEMORY_WINDOW = 20
|
src/exception.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
class ChatbotException(Exception):
|
2 |
+
"""Base exception for chatbot-related errors."""
|
3 |
+
pass
|
4 |
+
|
5 |
+
class LLMLoadingError(ChatbotException):
|
6 |
+
"""Exception raised for issues while loading the LLM."""
|
7 |
+
pass
|
8 |
+
|
9 |
+
class ResponseGenerationError(ChatbotException):
|
10 |
+
"""Exception raised for errors in response generation."""
|
11 |
+
pass
|
src/llm.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
|
2 |
+
from langchain_groq import ChatGroq
|
3 |
+
from langchain.chains import LLMChain
|
4 |
+
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder
|
5 |
+
from langchain_core.messages import SystemMessage
|
6 |
+
from src.config import AppConfig
|
7 |
+
|
8 |
+
class EmotionalChatbot:
|
9 |
+
def __init__(self, chat_history=None):
|
10 |
+
"""
|
11 |
+
Initialize the chatbot with Memory and Groq integration.
|
12 |
+
:param chat_history: Existing chat history to load into memory.
|
13 |
+
"""
|
14 |
+
self.memory = ConversationBufferWindowMemory(
|
15 |
+
k=AppConfig.MEMORY_WINDOW,
|
16 |
+
memory_key="chat_history",
|
17 |
+
return_messages=True
|
18 |
+
)
|
19 |
+
|
20 |
+
# Load existing chat history into memory if available
|
21 |
+
if chat_history:
|
22 |
+
for message in chat_history:
|
23 |
+
try:
|
24 |
+
self.memory.save_context(
|
25 |
+
{"input": message["human"]},
|
26 |
+
{"output": message["AI"]}
|
27 |
+
)
|
28 |
+
except KeyError:
|
29 |
+
print(f"Skipping invalid message: {message}")
|
30 |
+
|
31 |
+
self.default_max_tokens = 100
|
32 |
+
|
33 |
+
# Initialize ChatGroq with default max_tokens
|
34 |
+
self.groq_chat = ChatGroq(**AppConfig.LLM_CONFIG, max_tokens=self.default_max_tokens)
|
35 |
+
|
36 |
+
self.prompt_template = ChatPromptTemplate.from_messages(
|
37 |
+
[
|
38 |
+
SystemMessage(content=AppConfig.SYSTEM_PROMPT),
|
39 |
+
MessagesPlaceholder(variable_name="chat_history"),
|
40 |
+
HumanMessagePromptTemplate.from_template("{human_input}")
|
41 |
+
]
|
42 |
+
)
|
43 |
+
|
44 |
+
self.conversation = LLMChain(
|
45 |
+
llm=self.groq_chat,
|
46 |
+
prompt=self.prompt_template,
|
47 |
+
memory=self.memory,
|
48 |
+
verbose=True
|
49 |
+
)
|
50 |
+
|
51 |
+
def adjust_max_tokens(self, user_input):
|
52 |
+
"""
|
53 |
+
Dynamically adjust the max_tokens based on the user's input complexity.
|
54 |
+
"""
|
55 |
+
input_length = len(user_input.split())
|
56 |
+
|
57 |
+
if any(keyword in user_input.lower() for keyword in [
|
58 |
+
"explain", "detail", "elaborate", "describe", "clarify", "expand",
|
59 |
+
"analyze", "break down", "in-depth", "why", "how", "discuss",
|
60 |
+
"thorough", "comprehensive", "what do you mean"
|
61 |
+
]) or input_length >= 15:
|
62 |
+
return self.default_max_tokens # More tokens for detailed queries
|
63 |
+
else: # Moderate queries
|
64 |
+
return 50 # Balanced token usage
|
65 |
+
|
66 |
+
def generate_response(self, user_input):
|
67 |
+
"""
|
68 |
+
Generate a response using the LLM, with dynamically adjusted max_tokens.
|
69 |
+
:param user_input: The user's message.
|
70 |
+
:return: The AI's response.
|
71 |
+
"""
|
72 |
+
# Adjust max_tokens dynamically
|
73 |
+
max_tokens = self.adjust_max_tokens(user_input)
|
74 |
+
|
75 |
+
# Update the LLM configuration dynamically
|
76 |
+
self.groq_chat.max_tokens = max_tokens
|
77 |
+
|
78 |
+
# Generate the response
|
79 |
+
return self.conversation.predict(human_input=user_input)
|
src/log.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import logging
|
3 |
+
from src.config import AppConfig
|
4 |
+
|
5 |
+
|
6 |
+
LOGS_DIR = os.path.join(AppConfig.BASE_DIR, "logs")
|
7 |
+
os.makedirs(LOGS_DIR, exist_ok=True)
|
8 |
+
|
9 |
+
# Configure logging
|
10 |
+
logger = logging.getLogger("EmotionalIntelligenceBot")
|
11 |
+
logger.setLevel(logging.INFO)
|
12 |
+
|
13 |
+
log_file_path = os.path.join(LOGS_DIR, "application.log")
|
14 |
+
|
15 |
+
handler = logging.FileHandler(log_file_path)
|
16 |
+
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
17 |
+
handler.setFormatter(formatter)
|
18 |
+
logger.addHandler(handler)
|
src/utils.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def styles():
|
2 |
+
css = """
|
3 |
+
<style>
|
4 |
+
|
5 |
+
</style>
|
6 |
+
"""
|
7 |
+
return css
|
8 |
+
|
9 |
+
def sidebar_markdown():
|
10 |
+
sidebar_md = """
|
11 |
+
# Welcome to Lumina 🤖
|
12 |
+
|
13 |
+
---
|
14 |
+
|
15 |
+
**Who is Lumina?**
|
16 |
+
Lumina is an emotional intelligence chatbot designed to:
|
17 |
+
- 🧠 Understand your emotions.
|
18 |
+
- ❤️ Provide compassionate responses.
|
19 |
+
- 🌈 Help you feel supported and motivated.
|
20 |
+
|
21 |
+
---
|
22 |
+
|
23 |
+
**💡 How to use:**
|
24 |
+
1. Share your thoughts or ask a question.
|
25 |
+
2. Lumina will respond with insight and empathy.
|
26 |
+
3. Explore any topic—Lumina is here for you.
|
27 |
+
|
28 |
+
---
|
29 |
+
|
30 |
+
🎯 **Pro Tip:**
|
31 |
+
The more you engage, the better Lumina understands you. Use this space to express yourself.
|
32 |
+
|
33 |
+
---
|
34 |
+
|
35 |
+
🌟 **Let Lumina bring light to your day!**
|
36 |
+
"""
|
37 |
+
return sidebar_md
|