DWW_bot / app.py
facehugger92's picture
Update app.py
56bdf54 verified
from llama_index.core import (
Settings,
load_index_from_storage,
StorageContext,
)
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
import gradio as gr
import os
"""
Code adopted from Beebom article: "How to Train an AI Chatbot With Custom Knowledge Base Using ChatGPT API" by Arjun Sha
https://beebom.com/how-train-ai-chatbot-custom-knowledge-base-chatgpt-api/
Updated Feb 22, 2025 to use updated OpenAI API and Llama Index library
"""
max_input_size = 4096
num_outputs = 512
chunk_size_limit = 600
chunk_overlap = int(chunk_size_limit * 0.1)
llm = OpenAI(
model="gpt-3.5-turbo",
temperature=0.5,
max_tokens=num_outputs
)
Settings.llm = llm
Settings.node_parser = SentenceSplitter(
chunk_size=chunk_size_limit,
chunk_overlap=chunk_overlap
)
Settings.context_window = max_input_size
Settings.num_output = num_outputs
def retrieve_index(index_path):
storage_context = StorageContext.from_defaults(persist_dir=index_path)
index = load_index_from_storage(storage_context)
return index
def chatbot(input_text):
response = QE.query(input_text)
response_stream = ""
for r in response.response_gen:
response_stream += r
yield response_stream
if __name__ == "__main__":
iface = gr.Interface(fn=chatbot,
inputs=gr.components.Textbox(lines=7, label="Enter your text"),
outputs="text",
title="AI Chatbot for the Doing What Works Library")
index = retrieve_index("dww_vectors")
QE = index.as_query_engine(streaming=True)
iface.launch(share=False)