Kuldip2411 commited on
Commit
4019b2f
·
verified ·
1 Parent(s): f8817e5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +107 -105
app.py CHANGED
@@ -1,106 +1,108 @@
1
- import os
2
- import base64
3
- from io import BytesIO
4
- from PIL import Image
5
- import streamlit as st
6
- from app_config import SYSTEM_PROMPT,MODEL,MAX_TOKENS,TRANSFORMER_MODEL
7
- from langchain.memory import ConversationSummaryBufferMemory
8
- from langchain_community.embeddings import HuggingFaceEmbeddings
9
- from langchain_google_genai import ChatGoogleGenerativeAI
10
- from langchain_groq import ChatGroq
11
- from streamlit_pdf_viewer import pdf_viewer
12
- from pydantic import BaseModel
13
- from langchain.chains import LLMChain
14
- from langchain.prompts import ChatPromptTemplate
15
- from langchain_community.vectorstores import FAISS
16
- from sentence_transformers import SentenceTransformer
17
- from typing import Any
18
-
19
- st.title("Hitachi Support Bot")
20
-
21
- class Element(BaseModel):
22
- type: str
23
- text: Any
24
-
25
- # llm = ChatGoogleGenerativeAI(
26
- # model=MODEL,
27
- # max_tokens=MAX_TOKENS
28
- # )
29
- llm = ChatGroq(model=MODEL,api_key='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')
30
-
31
-
32
- prompt = ChatPromptTemplate.from_template(SYSTEM_PROMPT)
33
- qa_chain = LLMChain(llm=llm,prompt=prompt)
34
- embeddings = HuggingFaceEmbeddings(model_name=TRANSFORMER_MODEL)
35
- db = FAISS.load_local("faiss_index",embeddings,allow_dangerous_deserialization=True)
36
-
37
- st.markdown(
38
- """
39
- <style>
40
- .st-emotion-cache-janbn0 {
41
- flex-direction: row-reverse;
42
- text-align: right;
43
- }
44
- </style>
45
- """,
46
- unsafe_allow_html=True,
47
- )
48
-
49
- def response_generator(question):
50
- relevant_docs = db.similarity_search_with_relevance_scores(question,k=5)
51
- context = ""
52
- relevant_images = []
53
- for d,score in relevant_docs:
54
- if score > 0:
55
- if d.metadata['type'] == 'text':
56
- context += str(d.metadata['original_content'])
57
- elif d.metadata['type'] == 'table':
58
- context += str(d.metadata['original_content'])
59
- elif d.metadata['type'] == 'image':
60
- context += d.page_content
61
- relevant_images.append(d.metadata['original_content'])
62
- result = qa_chain.run({'context':context,"question":question})
63
- return result,relevant_images
64
-
65
- with st.sidebar:
66
- st.header("Hitachi Support Bot")
67
- button = st.toggle("View Doc file.")
68
-
69
- if button:
70
- pdf_viewer("GPT OUTPUT.pdf")
71
- else:
72
- if "messages" not in st.session_state:
73
- st.session_state.messages=[{"role": "system", "content": SYSTEM_PROMPT}]
74
-
75
- if "llm" not in st.session_state:
76
- st.session_state.llm = llm
77
- if "rag_memory" not in st.session_state:
78
- st.session_state.rag_memory = ConversationSummaryBufferMemory(llm=st.session_state.llm, max_token_limit= 5000)
79
-
80
- container = st.container(height=700)
81
- for message in st.session_state.messages:
82
- if message["role"] != "system":
83
- if message["role"] == "user":
84
- with container.chat_message(message["role"]):
85
- st.write(message["content"])
86
- if message["role"] == "assistant":
87
- with container.chat_message(message["role"]):
88
- st.write(message["content"])
89
- for i in range(len(message["images"])):
90
- st.image(Image.open(BytesIO(base64.b64decode(message["images"][i].encode('utf-8')))))
91
-
92
- if prompt := st.chat_input("Enter your query here... "):
93
- with container.chat_message("user"):
94
- st.write(prompt)
95
- st.session_state.messages.append({"role":"user" , "content":prompt})
96
- with container.chat_message("assistant"):
97
- response,images = response_generator(prompt)
98
- st.write(response)
99
- for i in range(len(images)):
100
- st.markdown("""---""")
101
- st.image(Image.open(BytesIO(base64.b64decode(images[i].encode('utf-8')))))
102
- st.markdown("""---""")
103
-
104
-
105
- st.session_state.rag_memory.save_context({'input': prompt}, {'output': response})
 
 
106
  st.session_state.messages.append({"role":"assistant" , "content":response,'images':images})
 
1
+ import os
2
+ import base64
3
+ from io import BytesIO
4
+ from PIL import Image
5
+ import streamlit as st
6
+ from app_config import SYSTEM_PROMPT,MODEL,MAX_TOKENS,TRANSFORMER_MODEL
7
+ from langchain.memory import ConversationSummaryBufferMemory
8
+ from langchain_community.embeddings import HuggingFaceEmbeddings
9
+ from langchain_google_genai import ChatGoogleGenerativeAI
10
+ from langchain_groq import ChatGroq
11
+ from streamlit_pdf_viewer import pdf_viewer
12
+ from pydantic import BaseModel
13
+ from langchain.chains import LLMChain
14
+ from langchain.prompts import ChatPromptTemplate
15
+ from langchain_community.vectorstores import FAISS
16
+ from sentence_transformers import SentenceTransformer
17
+ from typing import Any
18
+
19
+ st.title("Hitachi Support Bot")
20
+
21
+ class Element(BaseModel):
22
+ type: str
23
+ text: Any
24
+
25
+ # llm = ChatGoogleGenerativeAI(
26
+ # model=MODEL,
27
+ # max_tokens=MAX_TOKENS
28
+ # )
29
+ # llm = ChatGroq(model=MODEL,api_key='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')
30
+ llm = ChatGroq(model=MODEL,api_key=os.getenv('API_KEY'))
31
+
32
+
33
+
34
+ prompt = ChatPromptTemplate.from_template(SYSTEM_PROMPT)
35
+ qa_chain = LLMChain(llm=llm,prompt=prompt)
36
+ embeddings = HuggingFaceEmbeddings(model_name=TRANSFORMER_MODEL)
37
+ db = FAISS.load_local("faiss_index",embeddings,allow_dangerous_deserialization=True)
38
+
39
+ st.markdown(
40
+ """
41
+ <style>
42
+ .st-emotion-cache-janbn0 {
43
+ flex-direction: row-reverse;
44
+ text-align: right;
45
+ }
46
+ </style>
47
+ """,
48
+ unsafe_allow_html=True,
49
+ )
50
+
51
+ def response_generator(question):
52
+ relevant_docs = db.similarity_search_with_relevance_scores(question,k=5)
53
+ context = ""
54
+ relevant_images = []
55
+ for d,score in relevant_docs:
56
+ if score > 0:
57
+ if d.metadata['type'] == 'text':
58
+ context += str(d.metadata['original_content'])
59
+ elif d.metadata['type'] == 'table':
60
+ context += str(d.metadata['original_content'])
61
+ elif d.metadata['type'] == 'image':
62
+ context += d.page_content
63
+ relevant_images.append(d.metadata['original_content'])
64
+ result = qa_chain.run({'context':context,"question":question})
65
+ return result,relevant_images
66
+
67
+ with st.sidebar:
68
+ st.header("Hitachi Support Bot")
69
+ button = st.toggle("View Doc file.")
70
+
71
+ if button:
72
+ pdf_viewer("GPT OUTPUT.pdf")
73
+ else:
74
+ if "messages" not in st.session_state:
75
+ st.session_state.messages=[{"role": "system", "content": SYSTEM_PROMPT}]
76
+
77
+ if "llm" not in st.session_state:
78
+ st.session_state.llm = llm
79
+ if "rag_memory" not in st.session_state:
80
+ st.session_state.rag_memory = ConversationSummaryBufferMemory(llm=st.session_state.llm, max_token_limit= 5000)
81
+
82
+ container = st.container(height=700)
83
+ for message in st.session_state.messages:
84
+ if message["role"] != "system":
85
+ if message["role"] == "user":
86
+ with container.chat_message(message["role"]):
87
+ st.write(message["content"])
88
+ if message["role"] == "assistant":
89
+ with container.chat_message(message["role"]):
90
+ st.write(message["content"])
91
+ for i in range(len(message["images"])):
92
+ st.image(Image.open(BytesIO(base64.b64decode(message["images"][i].encode('utf-8')))))
93
+
94
+ if prompt := st.chat_input("Enter your query here... "):
95
+ with container.chat_message("user"):
96
+ st.write(prompt)
97
+ st.session_state.messages.append({"role":"user" , "content":prompt})
98
+ with container.chat_message("assistant"):
99
+ response,images = response_generator(prompt)
100
+ st.write(response)
101
+ for i in range(len(images)):
102
+ st.markdown("""---""")
103
+ st.image(Image.open(BytesIO(base64.b64decode(images[i].encode('utf-8')))))
104
+ st.markdown("""---""")
105
+
106
+
107
+ st.session_state.rag_memory.save_context({'input': prompt}, {'output': response})
108
  st.session_state.messages.append({"role":"assistant" , "content":response,'images':images})