File size: 2,819 Bytes
24fd138 f4a4193 2f43d34 24fd138 2d78a4c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
import streamlit as st
import random
import time
import os
from langchain_together import ChatTogether
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import TextLoader
from langchain_core.prompts import ChatPromptTemplate
from langchain_community.vectorstores import FAISS
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_together import TogetherEmbeddings
os.environ["TOGETHER_API_KEY"] = "6216ce36aadcb06c35436e7d6bbbc18b354d8140f6e805db485d70ecff4481d0"
#load
loader = TextLoader("Resume_data.txt")
documents = loader.load()
# split it into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
vectorstore = FAISS.from_documents(docs,
TogetherEmbeddings(model="togethercomputer/m2-bert-80M-8k-retrieval")
)
retriever = vectorstore.as_retriever()
print("assigning model")
model = ChatTogether(
model="meta-llama/Llama-3-70b-chat-hf",
temperature=0.0,
max_tokens=500,)
# template = """<s>[INST] answer from context only as if person is responding (use i instead of you in response). and always answer in short answer.
# answer for asked question only, if he greets greet back.
template = """
{context}
Question: {question} [/INST]
"""
prompt = ChatPromptTemplate.from_template(template)
chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| model
| StrOutputParser()
)
st.title("Simple chat")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input("What is up?"):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
############################################
# Streamed response emulator
def response_generator():
query = f"{prompt}"
if query != "None":
for m in chain.stream(query):
print(m)
yield m + " "
else:
yield "How can i help you?"
# time.sleep(0.05)
# return chain.invoke(query)
###########################################
# Display assistant response in chat message container
with st.chat_message("assistant"):
response = st.write_stream(response_generator())
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response}) |