Spaces:
Runtime error
Runtime error
import gradio as gr | |
import pandas as pd | |
import pickle | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.text_splitter import CharacterTextSplitter | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain_community.vectorstores import Chroma | |
#from langchain.vectorstores import Chroma | |
from langchain_community.document_loaders import TextLoader | |
from langchain_community.chat_models import ChatOpenAI | |
from langchain import PromptTemplate | |
from langchain.chains import LLMChain | |
from langchain_community.llms import OpenAI | |
from langchain_community.vectorstores.faiss import FAISS | |
#from langchain.vectorstores import FAISS | |
# db = faiss.read_index('index.pkl') | |
#db = pickle.load('index.pkl') | |
with open('index.pickle', 'rb') as pkl: | |
doc_embedding = pickle.load(pkl) | |
db.save_local("faiss_index") | |
db=FAISS.load_local("faiss_index", embeddings,allow_dangerous_deserialization=True) | |
#----------------------------------------------------------------------------- | |
def get_response_from_query(db, query, k=3): | |
docs = db.similarity_search(query, k=k) | |
docs_page_content = " ".join([d.page_content for d in docs]) | |
# llm = BardLLM() | |
llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k",temperature=0) | |
prompt = PromptTemplate( | |
input_variables=["question", "docs"], | |
template=""" | |
A bot that is open to discussions about different cultural, philosophical and political exchanges. I will use do different analysis to the articles provided to me. Stay truthful and if you weren't provided any resources give your oppinion only. | |
Answer the following question: {question} | |
By searching the following articles: {docs} | |
Only use the factual information from the documents. Make sure to mention key phrases from the articles. | |
If you feel like you don't have enough information to answer the question, say "I don't know". | |
""", | |
) | |
chain = LLMChain(llm=llm, prompt=prompt) | |
# chain = RetrievalQAWithSourcesChain.from_chain_type(llm=llm, prompt=prompt, | |
# chain_type="stuff", retriever=db.as_retriever(), return_source_documents=True) | |
response = chain.run(question=query, docs=docs_page_content,return_source_documents=True) | |
r_text = str(response) | |
##evaluation part | |
prompt_eval = PromptTemplate( | |
input_variables=["answer", "docs"], | |
template=""" | |
You job is to evaluate if the response to a given context is faithful. | |
for the following: {answer} | |
By searching the following article: {docs} | |
Give a reason why they are similar or not, start with a Yes or a No. | |
""", | |
) | |
chain_part_2 = LLMChain(llm=llm, prompt=prompt_eval) | |
evals = chain_part_2.run(answer=r_text, docs=docs_page_content) | |
return response,docs,evals | |
def greet(query): | |
answer,sources,evals = get_response_from_query(db,query,2) | |
return answer,sources,evals | |
demo = gr.Interface(fn=greet, title="cicero-semantic-search", inputs="text", | |
outputs=[gr.components.Textbox(lines=3, label="Response"), | |
gr.components.Textbox(lines=3, label="Source"), | |
gr.components.Textbox(lines=3, label="Evaluation")]) | |
demo.launch(share=True, debug=True) | |