agentharbor's picture
Update app.py
d01a68b verified
import gradio as gr
import getpass
import os
from langchain.chains.question_answering import load_qa_chain
from langchain import HuggingFaceHub
from langchain_community.llms import HuggingFaceEndpoint
if "HUGGINGFACEHUB_API_TOKEN" not in os.environ:
os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv('hf_token')
from langchain.document_loaders import TextLoader
loader = TextLoader('./Agentville Academy.txt')
documents = loader.load()
import textwrap
def wrap_text_preserve_newlines(text, width=110):
# Split the input text into lines based on newline characters
lines = text.split('\n')
# Wrap each line individually
wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
# Join the wrapped lines back together using newline characters
wrapped_text = '\n'.join(wrapped_lines)
return wrapped_text
# Text Splitter
from langchain.text_splitter import CharacterTextSplitter
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
# Embeddings
from langchain.embeddings import HuggingFaceEmbeddings
embeddings = HuggingFaceEmbeddings()
# Vectorstore: https://python.langchain.com/en/latest/modules/indexes/vectorstores.html
from langchain.vectorstores import FAISS
db = FAISS.from_documents(docs, embeddings)
llm=HuggingFaceEndpoint(repo_id="mistralai/Mistral-7B-Instruct-v0.2", temperature=0.2, max_length=512)
chain = load_qa_chain(llm, chain_type="stuff")
def get_answer(query):
docs = db.similarity_search(query)
response = chain.run(input_documents=docs, question=query)
#return wrap_text_preserve_newlines(str(docs[0].page_content))
return response
demo = gr.Interface(fn=get_answer, inputs="text", theme=gr.themes.Glass(), outputs=gr.Markdown(), title="Agentville Academy: Where your Agent knowledge goes from zero to expert!", examples=['What is MemGPT?','Generate a learning plan for mastering autonomous agents','What is the difference between an LLM and an agent?'])
demo.launch()