fastx commited on
Commit
3ee0620
·
1 Parent(s): 22a1785

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -9
app.py CHANGED
@@ -1,37 +1,42 @@
1
  import gradio as gr
2
  import json
3
  import os
4
- import openai
5
  from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
6
  from langchain import OpenAI
7
  import sys
8
 
9
  from IPython.display import Markdown, display
10
 
 
 
 
11
  def construct_index(directory_path):
12
 
13
  max_input_size = 4096
 
14
  num_outputs = 2000
 
15
  max_chunk_overlap = 20
 
16
  chunk_size_limit = 600
17
 
 
18
  llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
19
  prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
 
20
 
21
- def ask_ai(question, api_key):
22
- os.environ["OPENAI_API_KEY"] = api_key
23
  index = GPTSimpleVectorIndex.load_from_disk('index.json')
24
  response = index.query(question, response_mode="compact")
25
  return response.response
26
 
27
- construct_index("data")
28
 
29
- api_key_input = gr.inputs.Textbox(label="Enter your OpenAI API Key")
 
 
30
 
31
- question_input = gr.inputs.Textbox(label="Ask a question")
32
 
33
- output_text = gr.outputs.Textbox(label="Answer")
34
 
35
- iface = gr.Interface(fn=ask_ai, inputs=[question_input, api_key_input], outputs=output_text, title="OpenAI Chatbot")
36
 
37
- iface.launch()
 
1
  import gradio as gr
2
  import json
3
  import os
 
4
  from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
5
  from langchain import OpenAI
6
  import sys
7
 
8
  from IPython.display import Markdown, display
9
 
10
+
11
+
12
+
13
  def construct_index(directory_path):
14
 
15
  max_input_size = 4096
16
+
17
  num_outputs = 2000
18
+
19
  max_chunk_overlap = 20
20
+
21
  chunk_size_limit = 600
22
 
23
+
24
  llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
25
  prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
26
+
27
 
28
+ def ask_ai(question,api):
 
29
  index = GPTSimpleVectorIndex.load_from_disk('index.json')
30
  response = index.query(question, response_mode="compact")
31
  return response.response
32
 
 
33
 
34
+ os.environ["OPENAI_API_KEY"] = "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"
35
+
36
+ construct_index("data")
37
 
 
38
 
39
+ iface = gr.Interface(fn=ask_ai, inputs="text", outputs="text" ,title="Chatbot")
40
 
 
41
 
42
+ iface.launch()