Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -28,6 +28,15 @@ from langchain_pinecone import PineconeVectorStore
|
|
28 |
from langchain.chains import RetrievalQA
|
29 |
import asyncio
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
|
32 |
def initialize_gpt_model():
|
33 |
return ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'], temperature=0, model='gpt-4o')
|
@@ -52,7 +61,16 @@ index_name="radarclintcountrymusic11152024"
|
|
52 |
vectorstore = PineconeVectorStore(index_name=index_name, embedding=embeddings)
|
53 |
retriever = vectorstore.as_retriever(search_kwargs={'k': 2})
|
54 |
|
55 |
-
chat_model = ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'], temperature=0, model='gpt-4o')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
|
57 |
#code for history
|
58 |
conversational_memory = ConversationBufferWindowMemory(
|
@@ -103,17 +121,30 @@ Helpful Answer:"""
|
|
103 |
|
104 |
QA_CHAIN_PROMPT= PromptTemplate(input_variables=["context", "question"], template=template)
|
105 |
|
106 |
-
def build_qa_chain(prompt_template):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
qa_chain = RetrievalQA.from_chain_type(
|
108 |
llm=chat_model,
|
109 |
chain_type="stuff",
|
110 |
retriever=retriever,
|
111 |
chain_type_kwargs={"prompt": prompt_template}
|
112 |
)
|
113 |
-
return qa_chain
|
114 |
|
115 |
# Instantiate the QA Chain using the defined prompt template
|
116 |
-
qa_chain = build_qa_chain(QA_CHAIN_PROMPT)
|
|
|
|
|
|
|
117 |
|
118 |
# Define the function to clear input and output
|
119 |
def clear_fields():
|
|
|
28 |
from langchain.chains import RetrievalQA
|
29 |
import asyncio
|
30 |
|
31 |
+
from langchain.globals import set_llm_cache
|
32 |
+
from langchain_openai import OpenAI
|
33 |
+
from langchain.cache import SQLiteCache
|
34 |
+
|
35 |
+
|
36 |
+
# Set up SQLite-based caching
|
37 |
+
set_llm_cache(SQLiteCache())
|
38 |
+
|
39 |
+
|
40 |
embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
|
41 |
def initialize_gpt_model():
|
42 |
return ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'], temperature=0, model='gpt-4o')
|
|
|
61 |
vectorstore = PineconeVectorStore(index_name=index_name, embedding=embeddings)
|
62 |
retriever = vectorstore.as_retriever(search_kwargs={'k': 2})
|
63 |
|
64 |
+
#chat_model = ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'], temperature=0, model='gpt-4o')
|
65 |
+
|
66 |
+
# Initialize a slower LLM model with caching
|
67 |
+
chat_model = OpenAI(
|
68 |
+
api_key=os.environ['OPENAI_API_KEY'],
|
69 |
+
model_name="gpt-4o",
|
70 |
+
n=2,
|
71 |
+
best_of=2,
|
72 |
+
temperature=0
|
73 |
+
)
|
74 |
|
75 |
#code for history
|
76 |
conversational_memory = ConversationBufferWindowMemory(
|
|
|
121 |
|
122 |
QA_CHAIN_PROMPT= PromptTemplate(input_variables=["context", "question"], template=template)
|
123 |
|
124 |
+
#def build_qa_chain(prompt_template):
|
125 |
+
#qa_chain = RetrievalQA.from_chain_type(
|
126 |
+
#llm=chat_model,
|
127 |
+
#chain_type="stuff",
|
128 |
+
#retriever=retriever,
|
129 |
+
#chain_type_kwargs={"prompt": prompt_template}
|
130 |
+
#)
|
131 |
+
#return qa_chain # Return the qa_chain object
|
132 |
+
|
133 |
+
# Function to initialize QA Chain with caching enabled
|
134 |
+
def build_qa_chain_with_cache(prompt_template):
|
135 |
qa_chain = RetrievalQA.from_chain_type(
|
136 |
llm=chat_model,
|
137 |
chain_type="stuff",
|
138 |
retriever=retriever,
|
139 |
chain_type_kwargs={"prompt": prompt_template}
|
140 |
)
|
141 |
+
return qa_chain
|
142 |
|
143 |
# Instantiate the QA Chain using the defined prompt template
|
144 |
+
#qa_chain = build_qa_chain(QA_CHAIN_PROMPT)
|
145 |
+
|
146 |
+
# Instantiate the QA Chain using the cached LLM
|
147 |
+
qa_chain = build_qa_chain_with_cache(QA_CHAIN_PROMPT)
|
148 |
|
149 |
# Define the function to clear input and output
|
150 |
def clear_fields():
|