Spaces:
Sleeping
Sleeping
import gradio as gr | |
import os | |
import openai | |
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader | |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding | |
from llama_index.core import Settings | |
import logging | |
# Configure logging | |
logging.basicConfig( | |
level=logging.INFO, # Set the logging level | |
format='%(asctime)s - %(levelname)s - %(message)s', # Define the log format | |
handlers=[ | |
logging.StreamHandler() # Output logs to the console | |
] | |
) | |
openai.api_key = os.environ['OpenAI_ApiKey'] | |
Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5") | |
logging.info("Start load document.") | |
documents = SimpleDirectoryReader("data").load_data() | |
index = VectorStoreIndex.from_documents(documents) | |
query_engine = index.as_query_engine() | |
def greet(question): | |
logging.info("execute greet") | |
return question | |
# return query_engine.query(question) | |
demo = gr.Interface(fn=greet, inputs="text", outputs="text") | |
demo.launch() |