|
|
|
|
|
|
|
import streamlit as st |
|
import time |
|
import os |
|
from dotenv import load_dotenv |
|
|
|
from langchain.memory import ConversationSummaryMemory |
|
from langchain.chains import ConversationChain |
|
from langchain_openai import ChatOpenAI |
|
from langchain.prompts import PromptTemplate |
|
from langchain_core.messages import HumanMessage, AIMessage |
|
|
|
|
|
|
|
try: |
|
|
|
load_dotenv('C:\\Users\\raj\\.jupyter\\.env') |
|
|
|
if os.getenv('OPENAI_API_KEY'): |
|
st.session_state['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY') |
|
except: |
|
print("Environment file not found !! Copy & paste your OpenAI API key.") |
|
|
|
|
|
|
|
|
|
|
|
st.title("LangChain ConversationSummaryMemory !!!") |
|
|
|
|
|
if 'OPENAI_API_KEY' in st.session_state: |
|
openai_api_key = st.sidebar.text_input('OpenAI API key',value=st.session_state['OPENAI_API_KEY']) |
|
else: |
|
openai_api_key = st.sidebar.text_input('OpenAI API key',placeholder='copy & paste your OpenAI API key') |
|
|
|
|
|
|
|
|
|
@st.cache_resource |
|
def get_summarization_llm(): |
|
model = 'gpt-3.5-turbo-0125' |
|
return ChatOpenAI(model=model, openai_api_key=openai_api_key) |
|
|
|
|
|
@st.cache_resource |
|
def get_llm(): |
|
model = 'gpt-3.5-turbo-0125' |
|
return ChatOpenAI(model=model, openai_api_key=openai_api_key) |
|
|
|
@st.cache_resource |
|
def get_llm_chain(): |
|
memory = st.session_state['MEMORY'] |
|
conversation = ConversationChain( |
|
llm=get_llm(), |
|
|
|
|
|
memory=memory |
|
) |
|
return conversation |
|
|
|
|
|
def get_chat_context(): |
|
memory = st.session_state['MEMORY'] |
|
return memory.buffer |
|
|
|
|
|
def get_llm_response(prompt): |
|
|
|
llm = get_llm_chain() |
|
|
|
|
|
with st.spinner('Invoking LLM ... '): |
|
|
|
chat_context = get_chat_context() |
|
|
|
|
|
query_payload = chat_context +'\n\n Question: ' + prompt |
|
|
|
response = llm.invoke(query_payload) |
|
|
|
return response |
|
|
|
|
|
if 'MEMORY' not in st.session_state: |
|
memory = ConversationSummaryMemory( |
|
llm = get_summarization_llm(), |
|
human_prefix='user', |
|
ai_prefix = 'assistant', |
|
return_messages=True |
|
) |
|
|
|
st.session_state['MEMORY'] = memory |
|
|
|
|
|
|
|
|
|
|
|
|
|
for msg in st.session_state['MEMORY'].chat_memory.messages: |
|
|
|
if (isinstance(msg, HumanMessage)): |
|
st.chat_message('user').write(msg.content) |
|
elif (isinstance(msg, AIMessage)): |
|
st.chat_message('ai').write(msg.content) |
|
else: |
|
print('System message: ', msg.content) |
|
|
|
|
|
|
|
|
|
prompt = st.chat_input(placeholder='Your input here') |
|
|
|
|
|
if prompt: |
|
|
|
|
|
st.chat_message('user').write(prompt) |
|
|
|
|
|
response = get_llm_response(prompt) |
|
|
|
|
|
st.chat_message('ai').write(response['response']) |
|
|
|
|
|
st.divider() |
|
st.subheader('Context/Summary:') |
|
|
|
|
|
st.session_state['MEMORY'].buffer |
|
|