fastx commited on
Commit
42c37e7
·
1 Parent(s): 79bee64

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -11
app.py CHANGED
@@ -5,12 +5,18 @@ import sys
5
  import os
6
  from IPython.display import Markdown, display
7
 
8
- def train_with_data(directory_path):
 
 
9
  max_input_size = 4096
 
10
  num_outputs = 2000
 
11
  max_chunk_overlap = 20
 
12
  chunk_size_limit = 600
13
 
 
14
  llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
15
  prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
16
 
@@ -22,6 +28,9 @@ def train_with_data(directory_path):
22
 
23
  return index
24
 
 
 
 
25
  '''
26
  def ask_ai():
27
  index = GPTSimpleVectorIndex.load_from_disk('index.json')
@@ -30,22 +39,20 @@ def ask_ai():
30
  response = index.query(query, response_mode="compact")
31
  display(Markdown(f"Response: <b>{response.response}</b>"))
32
  '''
33
- def ask_ai(query, api_key):
34
- # Set default API key if user does not enter a key
35
- if api_key == "":
36
- api_key = "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"
37
-
38
- os.environ["OPENAI_API_KEY"] = api_key
39
-
40
  index = GPTSimpleVectorIndex.load_from_disk('index.json')
41
- response = index.query(query, response_mode="compact")
42
  return response.response
43
 
44
 
45
- train_with_data("data")
46
 
47
  # Create Gradio interface to prompt for API key
48
- api_key = gr.inputs.Textbox(label="Enter your OpenAI API key (leave blank to use default key):")
49
 
50
  # Define the interface
51
  iface = gr.Interface(fn=ask_ai, inputs=["text", api_key], outputs="text" ,title="Jim's Chatbot")
 
5
  import os
6
  from IPython.display import Markdown, display
7
 
8
+
9
+ def construct_index(directory_path):
10
+ # set maximum input size
11
  max_input_size = 4096
12
+ # set number of output tokens
13
  num_outputs = 2000
14
+ # set maximum chunk overlap
15
  max_chunk_overlap = 20
16
+ # set chunk size limit
17
  chunk_size_limit = 600
18
 
19
+ # define LLM
20
  llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
21
  prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
22
 
 
28
 
29
  return index
30
 
31
+
32
+
33
+
34
  '''
35
  def ask_ai():
36
  index = GPTSimpleVectorIndex.load_from_disk('index.json')
 
39
  response = index.query(query, response_mode="compact")
40
  display(Markdown(f"Response: <b>{response.response}</b>"))
41
  '''
42
+ # Define the ask_ai() function
43
+ def ask_ai(question,api):
44
+ if api == "":
45
+ api = "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"
46
+ os.environ["OPENAI_API_KEY"] = api
 
 
47
  index = GPTSimpleVectorIndex.load_from_disk('index.json')
48
+ response = index.query(question, response_mode="compact")
49
  return response.response
50
 
51
 
52
+ construct_index("context_data")
53
 
54
  # Create Gradio interface to prompt for API key
55
+ api_key = gr.inputs.Textbox(label="Enter your OpenAI API key:")
56
 
57
  # Define the interface
58
  iface = gr.Interface(fn=ask_ai, inputs=["text", api_key], outputs="text" ,title="Jim's Chatbot")