Pijush2023 commited on
Commit
711b5e7
·
verified ·
1 Parent(s): f8551f9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -28
app.py CHANGED
@@ -30,11 +30,10 @@ import asyncio
30
 
31
  from langchain.globals import set_llm_cache
32
  from langchain_openai import OpenAI
33
- from langchain.cache import SQLiteCache
34
 
35
 
36
- # Set up SQLite-based caching
37
- set_llm_cache(SQLiteCache())
38
 
39
 
40
  embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
@@ -61,16 +60,7 @@ index_name="radarclintcountrymusic11152024"
61
  vectorstore = PineconeVectorStore(index_name=index_name, embedding=embeddings)
62
  retriever = vectorstore.as_retriever(search_kwargs={'k': 2})
63
 
64
- #chat_model = ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'], temperature=0, model='gpt-4o')
65
-
66
- # Initialize a slower LLM model with caching
67
- chat_model = OpenAI(
68
- api_key=os.environ['OPENAI_API_KEY'],
69
- model_name="gpt-4o",
70
- n=2,
71
- best_of=2,
72
- temperature=0
73
- )
74
 
75
  #code for history
76
  conversational_memory = ConversationBufferWindowMemory(
@@ -121,30 +111,20 @@ Helpful Answer:"""
121
 
122
  QA_CHAIN_PROMPT= PromptTemplate(input_variables=["context", "question"], template=template)
123
 
124
- #def build_qa_chain(prompt_template):
125
- #qa_chain = RetrievalQA.from_chain_type(
126
- #llm=chat_model,
127
- #chain_type="stuff",
128
- #retriever=retriever,
129
- #chain_type_kwargs={"prompt": prompt_template}
130
- #)
131
- #return qa_chain # Return the qa_chain object
132
-
133
- # Function to initialize QA Chain with caching enabled
134
- def build_qa_chain_with_cache(prompt_template):
135
  qa_chain = RetrievalQA.from_chain_type(
136
  llm=chat_model,
137
  chain_type="stuff",
138
  retriever=retriever,
139
  chain_type_kwargs={"prompt": prompt_template}
140
  )
141
- return qa_chain
 
142
 
143
  # Instantiate the QA Chain using the defined prompt template
144
- #qa_chain = build_qa_chain(QA_CHAIN_PROMPT)
 
145
 
146
- # Instantiate the QA Chain using the cached LLM
147
- qa_chain = build_qa_chain_with_cache(QA_CHAIN_PROMPT)
148
 
149
  # Define the function to clear input and output
150
  def clear_fields():
 
30
 
31
  from langchain.globals import set_llm_cache
32
  from langchain_openai import OpenAI
 
33
 
34
 
35
+
36
+
37
 
38
 
39
  embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
 
60
  vectorstore = PineconeVectorStore(index_name=index_name, embedding=embeddings)
61
  retriever = vectorstore.as_retriever(search_kwargs={'k': 2})
62
 
63
+ chat_model = ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'], temperature=0, model='gpt-4o')
 
 
 
 
 
 
 
 
 
64
 
65
  #code for history
66
  conversational_memory = ConversationBufferWindowMemory(
 
111
 
112
  QA_CHAIN_PROMPT= PromptTemplate(input_variables=["context", "question"], template=template)
113
 
114
+ def build_qa_chain(prompt_template):
 
 
 
 
 
 
 
 
 
 
115
  qa_chain = RetrievalQA.from_chain_type(
116
  llm=chat_model,
117
  chain_type="stuff",
118
  retriever=retriever,
119
  chain_type_kwargs={"prompt": prompt_template}
120
  )
121
+ return qa_chain # Return the qa_chain object
122
+
123
 
124
  # Instantiate the QA Chain using the defined prompt template
125
+ qa_chain = build_qa_chain(QA_CHAIN_PROMPT)
126
+
127
 
 
 
128
 
129
  # Define the function to clear input and output
130
  def clear_fields():