Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,30 +1,40 @@
|
|
1 |
-
from llama_index import
|
2 |
-
|
|
|
|
|
|
|
|
|
|
|
3 |
import gradio as gr
|
4 |
-
import sys
|
5 |
import os
|
6 |
-
|
7 |
-
try:
|
8 |
-
from Config import openai_key
|
9 |
-
os.environ["OPENAI_API_KEY"] = openai_key
|
10 |
-
except:
|
11 |
-
pass
|
12 |
|
13 |
"""
|
14 |
Code adopted from Beebom article: "How to Train an AI Chatbot With Custom Knowledge Base Using ChatGPT API" by Arjun Sha
|
15 |
https://beebom.com/how-train-ai-chatbot-custom-knowledge-base-chatgpt-api/
|
|
|
16 |
"""
|
17 |
|
18 |
max_input_size = 4096
|
19 |
num_outputs = 512
|
20 |
chunk_size_limit = 600
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
def retrieve_index(index_path):
|
30 |
storage_context = StorageContext.from_defaults(persist_dir=index_path)
|
|
|
1 |
+
from llama_index.core import (
|
2 |
+
Settings,
|
3 |
+
load_index_from_storage,
|
4 |
+
StorageContext,
|
5 |
+
)
|
6 |
+
from llama_index.core.node_parser import SentenceSplitter
|
7 |
+
from llama_index.llms.openai import OpenAI
|
8 |
import gradio as gr
|
|
|
9 |
import os
|
10 |
+
from Config import openai_key
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
"""
|
13 |
Code adopted from Beebom article: "How to Train an AI Chatbot With Custom Knowledge Base Using ChatGPT API" by Arjun Sha
|
14 |
https://beebom.com/how-train-ai-chatbot-custom-knowledge-base-chatgpt-api/
|
15 |
+
Updated Feb 22, 2025 to use updated OpenAI API and Llama Index library
|
16 |
"""
|
17 |
|
18 |
max_input_size = 4096
|
19 |
num_outputs = 512
|
20 |
chunk_size_limit = 600
|
21 |
+
chunk_overlap = int(chunk_size_limit * 0.1)
|
22 |
+
|
23 |
+
os.environ["OPENAI_API_KEY"] = openai_key
|
24 |
+
|
25 |
+
llm = OpenAI(
|
26 |
+
model="gpt-3.5-turbo",
|
27 |
+
temperature=0.5,
|
28 |
+
max_tokens=num_outputs
|
29 |
+
)
|
30 |
+
|
31 |
+
Settings.llm = llm
|
32 |
+
Settings.node_parser = SentenceSplitter(
|
33 |
+
chunk_size=chunk_size_limit,
|
34 |
+
chunk_overlap=chunk_overlap
|
35 |
+
)
|
36 |
+
Settings.context_window = max_input_size
|
37 |
+
Settings.num_output = num_outputs
|
38 |
|
39 |
def retrieve_index(index_path):
|
40 |
storage_context = StorageContext.from_defaults(persist_dir=index_path)
|