Spaces:
Runtime error
Runtime error
File size: 3,353 Bytes
7fcaeb8 4fe83fe e532a6f ef12c80 e532a6f 2dac405 9497748 9c89101 e532a6f 9c89101 3aa6a46 e532a6f bf2c261 c4ab978 b0c41b3 e532a6f b0c41b3 e532a6f 76a8d17 e532a6f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
import gradio as gr
import pandas as pd
import pickle
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma
#from langchain.vectorstores import Chroma
from langchain_community.document_loaders import TextLoader
from langchain_community.chat_models import ChatOpenAI
from langchain import PromptTemplate
from langchain.chains import LLMChain
from langchain_community.llms import OpenAI
from langchain_community.vectorstores.faiss import FAISS
#from langchain.vectorstores import FAISS
# db = faiss.read_index('index.pkl')
#db = pickle.load('index.pkl')
with open('index.pickle', 'rb') as pkl:
doc_embedding = pickle.load(pkl)
db.save_local("faiss_index")
db=FAISS.load_local("faiss_index", embeddings,allow_dangerous_deserialization=True)
#-----------------------------------------------------------------------------
def get_response_from_query(db, query, k=3):
docs = db.similarity_search(query, k=k)
docs_page_content = " ".join([d.page_content for d in docs])
# llm = BardLLM()
llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k",temperature=0)
prompt = PromptTemplate(
input_variables=["question", "docs"],
template="""
A bot that is open to discussions about different cultural, philosophical and political exchanges. I will use do different analysis to the articles provided to me. Stay truthful and if you weren't provided any resources give your oppinion only.
Answer the following question: {question}
By searching the following articles: {docs}
Only use the factual information from the documents. Make sure to mention key phrases from the articles.
If you feel like you don't have enough information to answer the question, say "I don't know".
""",
)
chain = LLMChain(llm=llm, prompt=prompt)
# chain = RetrievalQAWithSourcesChain.from_chain_type(llm=llm, prompt=prompt,
# chain_type="stuff", retriever=db.as_retriever(), return_source_documents=True)
response = chain.run(question=query, docs=docs_page_content,return_source_documents=True)
r_text = str(response)
##evaluation part
prompt_eval = PromptTemplate(
input_variables=["answer", "docs"],
template="""
You job is to evaluate if the response to a given context is faithful.
for the following: {answer}
By searching the following article: {docs}
Give a reason why they are similar or not, start with a Yes or a No.
""",
)
chain_part_2 = LLMChain(llm=llm, prompt=prompt_eval)
evals = chain_part_2.run(answer=r_text, docs=docs_page_content)
return response,docs,evals
def greet(query):
answer,sources,evals = get_response_from_query(db,query,2)
return answer,sources,evals
demo = gr.Interface(fn=greet, title="cicero-semantic-search", inputs="text",
outputs=[gr.components.Textbox(lines=3, label="Response"),
gr.components.Textbox(lines=3, label="Source"),
gr.components.Textbox(lines=3, label="Evaluation")])
demo.launch(share=True, debug=True)
|