fastx commited on
Commit
dd9022d
·
1 Parent(s): 2ba8091

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -28
app.py CHANGED
@@ -1,50 +1,34 @@
1
  import gradio as gr
2
- import openai
3
  from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
4
- from langchain.chat_models import ChatOpenAI
5
  import sys
6
- import os
7
  from IPython.display import Markdown, display
8
 
9
- # Define API key globally
10
- api_key = "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"
11
-
12
  def construct_index(directory_path):
13
- # set maximum input size
14
  max_input_size = 4096
15
- # set number of output tokens
16
  num_outputs = 2000
17
- # set maximum chunk overlap
18
  max_chunk_overlap = 20
19
- # set chunk size limit
20
  chunk_size_limit = 600
21
 
22
- # define LLM
23
  llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
24
  prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
25
 
26
- documents = SimpleDirectoryReader(directory_path).load_data()
27
-
28
- index = GPTSimpleVectorIndex.from_documents(documents)
29
 
30
- index.save_to_disk('index.json')
31
 
32
- return index
33
 
34
- def ask_ai(question, api_key):
35
- # Use global API key variable
36
- os.environ["OPENAI_API_KEY"] = api_key
37
- index = GPTSimpleVectorIndex.load_from_disk('index.json')
38
- response = index.query(question, response_mode="compact")
39
- return response.response
40
 
41
  construct_index("data")
42
 
43
- # Create Gradio interface to prompt for API key
44
- api_key_input = gr.inputs.Textbox(label="Enter your OpenAI API key:")
45
 
46
- # Define the interface
47
- iface = gr.Interface(fn=ask_ai, inputs=["text", api_key_input], outputs="text" ,title="Jim's Chatbot")
48
 
49
- # Start the interface
50
- iface.launch()
 
1
  import gradio as gr
2
+ import json
3
  from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
4
+ from langchain import OpenAI
5
  import sys
 
6
  from IPython.display import Markdown, display
7
 
 
 
 
8
  def construct_index(directory_path):
9
+
10
  max_input_size = 4096
11
+
12
  num_outputs = 2000
13
+
14
  max_chunk_overlap = 20
15
+
16
  chunk_size_limit = 600
17
 
18
+
19
  llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
20
  prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
21
 
22
+ response = index.query(query, response_mode="compact")
23
+ display(Markdown(f"Response: <b>{response.response}</b>"))
 
24
 
 
25
 
 
26
 
27
+ os.environ["OPENAI_API_KEY"] = "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"
 
 
 
 
 
28
 
29
  construct_index("data")
30
 
 
 
31
 
32
+ iface = gr.Interface(fn=ask_ai, inputs="text", outputs="text" ,title="Chatbot")
 
33
 
34
+ iface.launch()