Spaces:
Running
Running
File size: 1,660 Bytes
4e47565 75208e9 4e47565 75208e9 4e47565 75208e9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
from llama_index.core import (
Settings,
load_index_from_storage,
StorageContext,
)
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
import gradio as gr
import os
"""
Code adopted from Beebom article: "How to Train an AI Chatbot With Custom Knowledge Base Using ChatGPT API" by Arjun Sha
https://beebom.com/how-train-ai-chatbot-custom-knowledge-base-chatgpt-api/
Updated Feb 22, 2025 to use updated OpenAI API and Llama Index library
"""
max_input_size = 4096
num_outputs = 512
chunk_size_limit = 600
chunk_overlap = int(chunk_size_limit * 0.1)
llm = OpenAI(
model="gpt-3.5-turbo",
temperature=0.5,
max_tokens=num_outputs
)
Settings.llm = llm
Settings.node_parser = SentenceSplitter(
chunk_size=chunk_size_limit,
chunk_overlap=chunk_overlap
)
Settings.context_window = max_input_size
Settings.num_output = num_outputs
def retrieve_index(index_path):
storage_context = StorageContext.from_defaults(persist_dir=index_path)
index = load_index_from_storage(storage_context)
return index
def chatbot(input_text):
response = QE.query(input_text)
response_stream = ""
for r in response.response_gen:
response_stream += r
yield response_stream
if __name__ == "__main__":
iface = gr.Interface(fn=chatbot,
inputs=gr.components.Textbox(lines=7, label="Enter your text"),
outputs="text",
title="AI Chatbot for the Doing What Works Library")
index = retrieve_index("dww_vectors")
QE = index.as_query_engine(streaming=True)
iface.launch(share=False) |