Spaces:
Runtime error
Runtime error
import openai | |
import random | |
import time | |
import gradio as gr | |
import os | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.vectorstores import DeepLake | |
from langchain.chat_models import ChatOpenAI | |
from langchain.chains import ConversationalRetrievalChain | |
from langchain.document_loaders import TextLoader | |
from langchain.text_splitter import CharacterTextSplitter | |
from langchain.document_loaders import PyPDFDirectoryLoader | |
from langchain.memory import ConversationBufferMemory | |
from langchain.llms import OpenAI | |
def set_api_key(key): | |
os.environ["OPENAI_API_KEY"] = key | |
return f"Your API Key has been set to: {key}" | |
def reset_api_key(): | |
os.environ["OPENAI_API_KEY"] = "" | |
return "Your API Key has been reset" | |
def get_api_key(): | |
api_key = os.getenv("OPENAI_API_KEY") | |
return api_key | |
def set_model(model): | |
os.environ["OPENAI_MODEL"] = model | |
return f"{model} selected" | |
def get_model(): | |
model = os.getenv("OPENAI_MODEL") | |
return model | |
def upload_file(files): | |
file_paths = [file.name for file in files] | |
return file_paths | |
def create_vectorstore(files): | |
pdf_dir = files.name | |
pdf_loader = PyPDFDirectoryLoader(pdf_dir) | |
pdf_docs = pdf_loader.load_and_split() | |
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | |
texts = text_splitter.split_documents(pdf_docs) | |
embeddings = OpenAIEmbeddings() | |
db = DeepLake.from_documents(texts, dataset_path="./documentation_db", embedding=embeddings, overwrite=True) | |
return "Vectorstore Successfully Created" | |
def respond(message, chat_history): | |
# Get embeddings | |
embeddings = OpenAIEmbeddings() | |
#Connect to existing vectorstore | |
db = DeepLake(dataset_path="./documentation_db", embedding_function=embeddings, read_only=True) | |
#Set retriever settings | |
retriever = db.as_retriever(search_kwargs={"distance_metric":'cos', | |
"fetch_k":10, | |
"maximal_marginal_relevance":True, | |
"k":10}) | |
if len(chat_history) != 0: | |
chat_history = [(chat_history[0][0], chat_history[0][1])] | |
model = get_model() | |
# Create ChatOpenAI and ConversationalRetrievalChain | |
model = ChatOpenAI(model=model) | |
qa = ConversationalRetrievalChain.from_llm(model, retriever) | |
bot_message = qa({"question": message, "chat_history": chat_history}) | |
chat_history = [(message, bot_message["answer"])] | |
time.sleep(1) | |
return "", chat_history | |
with gr.Blocks() as demo: | |
#create chat history | |
chat_history = [] | |
with gr.Row(): | |
#create textbox for API input | |
api_input = gr.Textbox(label = "API Key", | |
placeholder = "Please provide your OpenAI API key here.") | |
#create textbox to validate API | |
api_key_status = gr.Textbox(label = "API Key Status", | |
placeholder = "Your API Key has not be set yet. Please enter your key.", | |
interactive = False) | |
#create button to submit API key | |
api_submit_button = gr.Button("Submit") | |
#set api_submit_button functionality | |
api_submit_button.click(set_api_key, inputs=api_input, outputs=api_key_status) | |
#create button to reset API key | |
api_reset_button = gr.Button("Clear API Key from session") | |
#set api_reset_button functionality | |
api_reset_button.click(reset_api_key, outputs=api_key_status) | |
with gr.Row(): | |
with gr.Column(): | |
#create dropdown to select model (gpt-3.5-turbo or gpt4) | |
model_selection = gr.Dropdown( | |
["gpt-3.5-turbo", "gpt-4"], label="Model Selection", info="Please ensure you provide the API Key that corresponds to the Model you select!" | |
) | |
#create button to submit model selection | |
model_submit_button = gr.Button("Submit Model Selection") | |
model_status = gr.Textbox(label = "Selected Model", interactive = False, lines=4) | |
#set model_submit_button functionality | |
model_submit_button.click(set_model, inputs=model_selection, outputs=model_status) | |
file_output = gr.File(label = "Uploaded files - Please note these files are persistent and will not be automatically deleted") | |
upload_button = gr.UploadButton("Click to Upload a PDF File", file_types=["pdf"], file_count="multiple") | |
upload_button.upload(upload_file, upload_button, file_output) | |
create_vectorstore_button = gr.Button("Click to create the vectorstore for your uploaded documents") | |
db_output = gr.Textbox(label = "Vectorstore Status") | |
create_vectorstore_button.click(create_vectorstore, inputs=file_output, outputs = db_output) | |
chatbot = gr.Chatbot(label="ChatGPT Powered Grant Writing Assistant") | |
msg = gr.Textbox(label="User Prompt", placeholder="Your Query Here") | |
clear = gr.Button("Clear") | |
msg.submit(respond, inputs = [msg, chatbot], outputs = [msg, chatbot]) | |
clear.click(lambda: None, None, chatbot, queue=False) | |
demo.launch() |