gabruarya commited on
Commit
988cf58
ยท
verified ยท
1 Parent(s): 6a93e88

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +142 -0
app.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Literal
3
+ import streamlit as st
4
+ import os
5
+ # from llamaapi import LlamaAPI
6
+ # from langchain_experimental.llms import ChatLlamaAPI
7
+ from langchain.embeddings import HuggingFaceEmbeddings
8
+ from pinecone import Pinecone
9
+ from langchain.vectorstores import Pinecone
10
+ from langchain.prompts import PromptTemplate
11
+ from langchain.chains import RetrievalQA
12
+ import streamlit.components.v1 as components
13
+ from langchain_groq import ChatGroq
14
+ from langchain.chains import ConversationalRetrievalChain
15
+ from langchain.memory import ChatMessageHistory, ConversationBufferMemory
16
+ import time
17
+
18
+ HUGGINGFACEHUB_API_TOKEN = st.secrets['HUGGINGFACEHUB_API_TOKEN']
19
+
20
+ @dataclass
21
+ class Message:
22
+ """Class for keeping track of a chat message."""
23
+ origin: Literal["๐Ÿ‘ค Human", "๐Ÿ‘จ๐Ÿปโ€โš–๏ธ Ai"]
24
+ message: str
25
+
26
+
27
+ def download_hugging_face_embeddings():
28
+ embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2')
29
+ return embeddings
30
+
31
+
32
+ def initialize_session_state():
33
+ if "history" not in st.session_state:
34
+ st.session_state.history = []
35
+ if "conversation" not in st.session_state:
36
+ # llama = LlamaAPI(st.secrets["LlamaAPI"])
37
+ # model = ChatLlamaAPI(client=llama)
38
+ chat = ChatGroq(temperature=0.5, groq_api_key=st.secrets["Groq_api"], model_name="mixtral-8x7b-32768")
39
+
40
+ embeddings = download_hugging_face_embeddings()
41
+
42
+ import os
43
+ os.environ['PINECONE_API_KEY'] = st.secrets["PINECONE_API_KEY"]
44
+ pc = Pinecone()
45
+ index_name = "medical-advisor" # put in the name of your pinecone index here
46
+
47
+ docsearch = Pinecone.from_existing_index(index_name, embeddings)
48
+
49
+ prompt_template = """
50
+ You are a trained bot to guide people about their medical concerns. You will answer user's query with your knowledge and the context provided.
51
+ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
52
+ Do not say thank you and tell you are an AI Assistant and be open about everything.
53
+ Use the following pieces of context to answer the users question.
54
+ Context: {context}
55
+ Question: {question}
56
+ Only return the helpful answer below and nothing else.
57
+ Helpful answer:
58
+ """
59
+
60
+ PROMPT = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
61
+
62
+ #chain_type_kwargs = {"prompt": PROMPT}
63
+ message_history = ChatMessageHistory()
64
+ memory = ConversationBufferMemory(
65
+ memory_key="chat_history",
66
+ output_key="answer",
67
+ chat_memory=message_history,
68
+ return_messages=True,
69
+ )
70
+ retrieval_chain = ConversationalRetrievalChain.from_llm(llm=chat,
71
+ chain_type="stuff",
72
+ retriever=docsearch.as_retriever(
73
+ search_kwargs={'k': 2}),
74
+ return_source_documents=True,
75
+ combine_docs_chain_kwargs={"prompt": PROMPT},
76
+ memory= memory
77
+ )
78
+
79
+ st.session_state.conversation = retrieval_chain
80
+
81
+
82
+ def on_click_callback():
83
+ human_prompt = st.session_state.human_prompt
84
+ st.session_state.human_prompt=""
85
+ response = st.session_state.conversation(
86
+ human_prompt
87
+ )
88
+ llm_response = response['answer']
89
+ st.session_state.history.append(
90
+ Message("๐Ÿ‘ค Human", human_prompt)
91
+ )
92
+ st.session_state.history.append(
93
+ Message("๐Ÿ‘จ๐Ÿปโ€โš–๏ธ Ai", llm_response)
94
+ )
95
+
96
+
97
+ initialize_session_state()
98
+
99
+ st.title("LegalEase Advisor Chatbot ๐Ÿ‡ฎ๐Ÿ‡ณ")
100
+
101
+ # st.markdown(
102
+ # """
103
+ # ๐Ÿ‘‹ **Namaste! Welcome to LegalEase Advisor!**
104
+ # I'm here to assist you with your legal queries within the framework of Indian law. Whether you're navigating through specific legal issues or seeking general advice, I'm here to help.
105
+
106
+ # ๐Ÿ“š **How I Can Assist:**
107
+
108
+ # - Answer questions on various aspects of Indian law.
109
+ # - Guide you through legal processes relevant to India.
110
+ # - Provide information on your rights and responsibilities as per Indian legal standards.
111
+
112
+ # โš–๏ธ **Disclaimer:**
113
+
114
+ # While I can provide general information, it's essential to consult with a qualified Indian attorney for advice tailored to your specific situation.
115
+
116
+ # ๐Ÿค– **Getting Started:**
117
+
118
+ # Feel free to ask any legal question related to Indian law, using keywords like "property rights," "labor laws," or "family law." I'm here to assist you!
119
+ # Let's get started! How can I assist you today?
120
+ # """
121
+ # )
122
+
123
+ chat_placeholder = st.container()
124
+ prompt_placeholder = st.form("chat-form")
125
+
126
+ with chat_placeholder:
127
+ for chat in st.session_state.history:
128
+ st.markdown(f"{chat.origin} : {chat.message}")
129
+
130
+ with prompt_placeholder:
131
+ st.markdown("**Chat**")
132
+ cols = st.columns((6, 1))
133
+ cols[0].text_input(
134
+ "Chat",
135
+ label_visibility="collapsed",
136
+ key="human_prompt",
137
+ )
138
+ cols[1].form_submit_button(
139
+ "Submit",
140
+ type="primary",
141
+ on_click=on_click_callback,
142
+ )