facehugger92 commited on
Commit
4e47565
·
verified ·
1 Parent(s): 2e13e17

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -16
app.py CHANGED
@@ -1,30 +1,40 @@
1
- from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, ServiceContext, set_global_service_context, load_index_from_storage, StorageContext, PromptHelper
2
- from llama_index.llms import OpenAI
 
 
 
 
 
3
  import gradio as gr
4
- import sys
5
  import os
6
-
7
- try:
8
- from Config import openai_key
9
- os.environ["OPENAI_API_KEY"] = openai_key
10
- except:
11
- pass
12
 
13
  """
14
  Code adopted from Beebom article: "How to Train an AI Chatbot With Custom Knowledge Base Using ChatGPT API" by Arjun Sha
15
  https://beebom.com/how-train-ai-chatbot-custom-knowledge-base-chatgpt-api/
 
16
  """
17
 
18
  max_input_size = 4096
19
  num_outputs = 512
20
  chunk_size_limit = 600
21
-
22
- prompt_helper = PromptHelper(context_window=max_input_size, num_output=num_outputs, chunk_overlap_ratio=0.1, chunk_size_limit=chunk_size_limit)
23
-
24
- llm = OpenAI(model="gpt-3.5-turbo", temperature=0.5, max_tokens=num_outputs)
25
-
26
- service_context = ServiceContext.from_defaults(llm=llm, prompt_helper=prompt_helper)
27
- set_global_service_context(service_context)
 
 
 
 
 
 
 
 
 
 
28
 
29
  def retrieve_index(index_path):
30
  storage_context = StorageContext.from_defaults(persist_dir=index_path)
 
1
+ from llama_index.core import (
2
+ Settings,
3
+ load_index_from_storage,
4
+ StorageContext,
5
+ )
6
+ from llama_index.core.node_parser import SentenceSplitter
7
+ from llama_index.llms.openai import OpenAI
8
  import gradio as gr
 
9
  import os
10
+ from Config import openai_key
 
 
 
 
 
11
 
12
  """
13
  Code adopted from Beebom article: "How to Train an AI Chatbot With Custom Knowledge Base Using ChatGPT API" by Arjun Sha
14
  https://beebom.com/how-train-ai-chatbot-custom-knowledge-base-chatgpt-api/
15
+ Updated Feb 22, 2025 to use updated OpenAI API and Llama Index library
16
  """
17
 
18
  max_input_size = 4096
19
  num_outputs = 512
20
  chunk_size_limit = 600
21
+ chunk_overlap = int(chunk_size_limit * 0.1)
22
+
23
+ os.environ["OPENAI_API_KEY"] = openai_key
24
+
25
+ llm = OpenAI(
26
+ model="gpt-3.5-turbo",
27
+ temperature=0.5,
28
+ max_tokens=num_outputs
29
+ )
30
+
31
+ Settings.llm = llm
32
+ Settings.node_parser = SentenceSplitter(
33
+ chunk_size=chunk_size_limit,
34
+ chunk_overlap=chunk_overlap
35
+ )
36
+ Settings.context_window = max_input_size
37
+ Settings.num_output = num_outputs
38
 
39
  def retrieve_index(index_path):
40
  storage_context = StorageContext.from_defaults(persist_dir=index_path)