import gradio as gr import openai from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper from langchain.chat_models import ChatOpenAI import sys import os from IPython.display import Markdown, display # Define API key globally api_key = "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" def construct_index(directory_path): # set maximum input size max_input_size = 4096 # set number of output tokens num_outputs = 2000 # set maximum chunk overlap max_chunk_overlap = 20 # set chunk size limit chunk_size_limit = 600 # define LLM llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_outputs)) prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit) documents = SimpleDirectoryReader(directory_path).load_data() index = GPTSimpleVectorIndex.from_documents(documents) index.save_to_disk('index.json') return index def ask_ai(question, api_key): # Use global API key variable os.environ["OPENAI_API_KEY"] = api_key index = GPTSimpleVectorIndex.load_from_disk('index.json') response = index.query(question, response_mode="compact") return response.response construct_index("data") # Create Gradio interface to prompt for API key api_key_input = gr.inputs.Textbox(label="Enter your OpenAI API key:") # Define the interface iface = gr.Interface(fn=ask_ai, inputs=["text", api_key_input], outputs="text" ,title="Jim's Chatbot") # Start the interface iface.launch()