Ahtisham1583 commited on
Commit
31897ad
ยท
verified ยท
1 Parent(s): d5c49ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +141 -60
app.py CHANGED
@@ -1,63 +1,144 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
- ],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  )
60
 
 
 
 
 
 
 
61
 
62
- if __name__ == "__main__":
63
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Literal
3
+ import streamlit as st
4
+ import os
5
+ from llamaapi import LlamaAPI
6
+ from langchain_experimental.llms import ChatLlamaAPI
7
+ from langchain.embeddings import HuggingFaceEmbeddings
8
+ import pinecone
9
+ from langchain.vectorstores import Pinecone
10
+ from langchain.prompts import PromptTemplate
11
+ from langchain.chains import RetrievalQA
12
+ import streamlit.components.v1 as components
13
+ from langchain_groq import ChatGroq
14
+ from langchain.chains import ConversationalRetrievalChain
15
+ from langchain.memory import ChatMessageHistory, ConversationBufferMemory
16
+ import time
17
+
18
+ HUGGINGFACEHUB_API_TOKEN = st.secrets['HUGGINGFACEHUB_API_TOKEN']
19
+
20
+ @dataclass
21
+ class Message:
22
+ """Class for keeping track of a chat message."""
23
+ origin: Literal["๐Ÿ‘ค Human", "๐Ÿ‘จ๐Ÿปโ€โš–๏ธ Ai"]
24
+ message: str
25
+
26
+
27
+ def download_hugging_face_embeddings():
28
+ embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2')
29
+ return embeddings
30
+
31
+
32
+ def initialize_session_state():
33
+ if "history" not in st.session_state:
34
+ st.session_state.history = []
35
+ if "conversation" not in st.session_state:
36
+ llama = LlamaAPI(st.secrets["LlamaAPI"])
37
+ model = ChatLlamaAPI(client=llama)
38
+ chat = ChatGroq(temperature=0.5, groq_api_key=st.secrets["Groq_api"], model_name="mixtral-8x7b-32768")
39
+
40
+ embeddings = download_hugging_face_embeddings()
41
+
42
+ # Initializing the Pinecone
43
+ pinecone.init(
44
+ api_key=st.secrets["PINECONE_API_KEY"], # find at app.pinecone.io
45
+ environment=st.secrets["PINECONE_API_ENV"] # next to api key in console
46
+ )
47
+ index_name = "legal-advisor" # put in the name of your pinecone index here
48
+
49
+ docsearch = Pinecone.from_existing_index(index_name, embeddings)
50
+
51
+ prompt_template = """
52
+ You are a trained bot to guide people about Indian Law. You will answer user's query with your knowledge and the context provided.
53
+ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
54
+ Do not say thank you and tell you are an AI Assistant and be open about everything.
55
+ Use the following pieces of context to answer the users question.
56
+ Context: {context}
57
+ Question: {question}
58
+ Only return the helpful answer below and nothing else.
59
+ Helpful answer:
60
+ """
61
+
62
+ PROMPT = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
63
+
64
+ #chain_type_kwargs = {"prompt": PROMPT}
65
+ message_history = ChatMessageHistory()
66
+ memory = ConversationBufferMemory(
67
+ memory_key="chat_history",
68
+ output_key="answer",
69
+ chat_memory=message_history,
70
+ return_messages=True,
71
+ )
72
+ retrieval_chain = ConversationalRetrievalChain.from_llm(llm=chat,
73
+ chain_type="stuff",
74
+ retriever=docsearch.as_retriever(
75
+ search_kwargs={'k': 2}),
76
+ return_source_documents=True,
77
+ combine_docs_chain_kwargs={"prompt": PROMPT},
78
+ memory= memory
79
+ )
80
+
81
+ st.session_state.conversation = retrieval_chain
82
+
83
+
84
+ def on_click_callback():
85
+ human_prompt = st.session_state.human_prompt
86
+ st.session_state.human_prompt=""
87
+ response = st.session_state.conversation(
88
+ human_prompt
89
+ )
90
+ llm_response = response['answer']
91
+ st.session_state.history.append(
92
+ Message("๐Ÿ‘ค Human", human_prompt)
93
+ )
94
+ st.session_state.history.append(
95
+ Message("๐Ÿ‘จ๐Ÿปโ€โš–๏ธ Ai", llm_response)
96
+ )
97
+
98
+
99
+ initialize_session_state()
100
+
101
+ st.title("LegalEase Advisor Chatbot ๐Ÿ‡ฎ๐Ÿ‡ณ")
102
+
103
+ st.markdown(
104
+ """
105
+ ๐Ÿ‘‹ **Namaste! Welcome to LegalEase Advisor!**
106
+ I'm here to assist you with your legal queries within the framework of Indian law. Whether you're navigating through specific legal issues or seeking general advice, I'm here to help.
107
+
108
+ ๐Ÿ“š **How I Can Assist:**
109
+
110
+ - Answer questions on various aspects of Indian law.
111
+ - Guide you through legal processes relevant to India.
112
+ - Provide information on your rights and responsibilities as per Indian legal standards.
113
+
114
+ โš–๏ธ **Disclaimer:**
115
+
116
+ While I can provide general information, it's essential to consult with a qualified Indian attorney for advice tailored to your specific situation.
117
+
118
+ ๐Ÿค– **Getting Started:**
119
+
120
+ Feel free to ask any legal question related to Indian law, using keywords like "property rights," "labor laws," or "family law." I'm here to assist you!
121
+ Let's get started! How can I assist you today?
122
+ """
123
  )
124
 
125
+ chat_placeholder = st.container()
126
+ prompt_placeholder = st.form("chat-form")
127
+
128
+ with chat_placeholder:
129
+ for chat in st.session_state.history:
130
+ st.markdown(f"{chat.origin} : {chat.message}")
131
 
132
+ with prompt_placeholder:
133
+ st.markdown("**Chat**")
134
+ cols = st.columns((6, 1))
135
+ cols[0].text_input(
136
+ "Chat",
137
+ label_visibility="collapsed",
138
+ key="human_prompt",
139
+ )
140
+ cols[1].form_submit_button(
141
+ "Submit",
142
+ type="primary",
143
+ on_click=on_click_callback,
144
+ )