Spaces:
Sleeping
Sleeping
File size: 1,612 Bytes
ddead39 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import os
import gradio as gr
from openai import OpenAI
import pprint
import chromadb
from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
# Load environment variables
client = OpenAI(api_key=os.getenv("OPENAI_KEY"))
pp = pprint.PrettyPrinter(indent=4)
def generate_response(messages):
model_name = os.getenv("MODEL_NAME")
response = client.chat.completions.create(model=model_name, messages=messages, temperature=0.5, max_tokens=250)
spinner.stop()
print("Request:")
pp.pprint(messages)
print(f"Completion tokens: {response.usage.completion_tokens}, Prompt tokens: {response.usage.prompt_tokens}, Total tokens: {response.usage.total_tokens}")
return response.choices[0].message
def chat_interface(user_input):
chroma_client = chromadb.Client()
embedding_function = OpenAIEmbeddingFunction(api_key=os.getenv("OPENAI_KEY"), model_name=os.getenv("EMBEDDING_MODEL"))
collection = chroma_client.create_collection(name="conversations", embedding_function=embedding_function)
messages = [{"role": "system", "content": "You are a kind and friendly chatbot"}]
results = collection.query(query_texts=[user_input], n_results=2)
for res in results['documents'][0]:
messages.append({"role": "user", "content": f"previous chat: {res}"})
messages.append({"role": "user", "content": user_input})
response = generate_response(messages)
return response
def main():
interface = gr.Interface(fn=chat_interface, inputs="text", outputs="text", title="Chatbot Interface")
interface.launch()
if __name__ == "__main__":
main()
|