File size: 2,833 Bytes
b7edee3
 
 
 
 
 
 
 
 
 
 
 
 
8fb51ab
 
b7edee3
8fb51ab
b7edee3
 
 
 
 
 
 
 
 
 
 
 
8fb51ab
 
b7edee3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8fb51ab
b7edee3
8fb51ab
b7edee3
 
 
 
 
 
6b4517f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import streamlit as st
import random
import time
import os
from langchain_together import ChatTogether
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import TextLoader
from langchain_core.prompts import ChatPromptTemplate
from langchain_community.vectorstores import FAISS
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_together import TogetherEmbeddings

#.env
os.environ["TOGETHER_API_KEY"] = os.getenv("API_TOKEN")

#load data (data for RAG application)
loader = TextLoader("Resume_data.txt")
documents = loader.load()

# split it into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
vectorstore = FAISS.from_documents(docs,
     TogetherEmbeddings(model="togethercomputer/m2-bert-80M-8k-retrieval")
)

retriever = vectorstore.as_retriever()
model = ChatTogether(
    # model="meta-llama/Llama-3-70b-chat-hf",   
    model = "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
    temperature=0.0,
    max_tokens=500,)

prompt = ChatPromptTemplate([
    ("system", "You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. answer as if person is responding. and if user greets then greet back"),
    ("user", "context : {context}, Question: {question}")
])

chain = (
    {"context": retriever, "question": RunnablePassthrough()}
    | prompt
    | model
    | StrOutputParser()
)


st.title("Chat with me")

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# Accept user input
if prompt := st.chat_input("What is up?"):
    # Display user message in chat message container
    with st.chat_message("user"):
        st.markdown(prompt)
    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})

############################################
# Streamed response emulator
def response_generator():
    query = f"{prompt}"
    if query != "None":
        for m in chain.stream(query):
            yield m
            time.sleep(0.02)
    else:
        yield "Hi, How can i help you?"
        
###########################################
# Display assistant response in chat message container
with st.chat_message("assistant"):
    response = st.write_stream(response_generator())
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})