File size: 4,308 Bytes
ed54ae1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
import streamlit as st
from langchain.prompts import (
    ChatPromptTemplate,
    HumanMessagePromptTemplate,
    MessagesPlaceholder,
)
from more_itertools import chunked

from langserve import RemoteRunnable
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import os
from langchain import PromptTemplate
from langchain import LLMChain
from langchain_together import Together
import re
import pdfplumber
# Set the API key with double quotes

os.environ['TOGETHER_API_KEY'] = "5653bbfbaf1f7c1438206f18e5dfc2f5992b8f0b6aa9796b0131ea454648ccde"

text = ""
max_pages = 16
with pdfplumber.open("/content/AI Engineer Test.pdf") as pdf:
        for i, page in enumerate(pdf.pages):
            if i >= max_pages:
                break
            text += page.extract_text() + "\n"

def Bot(Questions):
    chat_template = """
    Based on the provided context: {text}
    Please answer the following question: {Questions}
    Only provide answers that are directly related to the context. If the question is unrelated, respond with "I don't know".
    """
    prompt = PromptTemplate(
        input_variables=['text', 'Questions'],
        template=chat_template
    )
    llama3 = Together(model="meta-llama/Llama-3-70b-chat-hf", max_tokens=250)
    Generated_chat = LLMChain(llm=llama3, prompt=prompt)

    try:
        response = Generated_chat.invoke({
            "text": text,
            "Questions": Questions
        })

        response_text = response['text']

        response_text = response_text.replace("assistant", "")

        # Post-processing to handle repeated words and ensure completeness
        words = response_text.split()
        seen = set()
        filtered_words = [word for word in words if word.lower() not in seen and not seen.add(word.lower())]
        response_text = ' '.join(filtered_words)
        response_text = response_text.strip()  # Ensuring no extra spaces at the ends
        if not response_text.endswith('.'):
            response_text += '.'

        return response_text
    except Exception as e:
        return f"Error in generating response: {e}"

def ChatBot(Questions):
  greetings = ["hi", "hello", "hey", "greetings", "what's up", "howdy"]
    # Check if the input question is a greeting
  question_lower = Questions.lower().strip()
  if question_lower in greetings or any(question_lower.startswith(greeting) for greeting in greetings):
        return "Hello! How can I assist you with the document today?"
  else:
    response=Bot(Questions)
    return response.translate(str.maketrans('', '', '\n'))

    # --- Logo ---
st.set_page_config(
    page_title="AI Engineer Test Chatbot",
    page_icon="/content/Insight Therapy Solutions.png",
    layout="wide",
)
st.sidebar.image("/content/Insight Therapy Solutions.png", width=200)

st.sidebar.title("Navigation")
st.sidebar.write("Reclaim Your Mental Health")
st.sidebar.markdown("[Visit us at](https://www.insighttherapysolutions.com/)")

rag_chain = RemoteRunnable("http://69.61.24.171:8000/rag_chain/")



msgs = StreamlitChatMessageHistory(key="langchain_messages")

# --- Main Content ---
st.markdown("## 🔍 Chatbot For AI Engineer test:")

if len(msgs.messages) == 0:
    msgs.add_ai_message("Hi! How can I assist you today?")


for msg in msgs.messages:
    st.chat_message(msg.type).write(msg.content)

if prompt := st.chat_input():
    st.chat_message("human").write(prompt)

    with st.chat_message("assistant"):
        message_placeholder = st.empty()
        full_response = ""

        try:
            _chat_history = st.session_state.langchain_messages[1:40]
            _chat_history_tranform = list(
                chunked([msg.content for msg in _chat_history], n=2)
            )

            response = rag_chain.stream(
                {"question": prompt, "chat_history": _chat_history_tranform}
            )

            for res in response:
                full_response += res or ""
                message_placeholder.markdown(full_response + "|")
                message_placeholder.markdown(full_response)

            msgs.add_user_message(prompt)
            msgs.add_ai_message(full_response)

        except Exception as e:
            st.error(f"An error occured. {e}")