Spaces:
Sleeping
Sleeping
from openai import OpenAI | |
import gradio as gr | |
import os, json | |
# Attempt to load configuration from config.json | |
try: | |
with open('config.json') as config_file: | |
config = json.load(config_file) | |
OPENAI_API_KEY = config.get("OPENAI_API_KEY") | |
SYSTEM_PROMPT = config.get("SYSTEM_PROMPT") | |
except FileNotFoundError: | |
# If config.json is not found, fall back to environment variables | |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | |
SYSTEM_PROMPT = os.getenv("SYSTEM_PROMPT") | |
# Fallback to default values if necessary | |
if not OPENAI_API_KEY: | |
raise ValueError("OPENAI_API_KEY is not set in config.json or as an environment variable.") | |
if not SYSTEM_PROMPT: | |
SYSTEM_PROMPT = "This is a default system prompt." | |
client = OpenAI(api_key=OPENAI_API_KEY) | |
system_prompt = { | |
"role": "system", | |
"content": SYSTEM_PROMPT | |
} | |
MODEL = "gpt-3.5-turbo" | |
def predict(message, history): | |
history_openai_format = [system_prompt] | |
for human, assistant in history: | |
history_openai_format.append({"role": "user", "content": human }) | |
history_openai_format.append({"role": "assistant", "content":assistant}) | |
history_openai_format.append({"role": "user", "content": message}) | |
response = client.chat.completions.create(model=MODEL, | |
messages= history_openai_format, | |
temperature=1.0, | |
stream=True) | |
partial_message = "" | |
for chunk in response: | |
if chunk.choices[0].delta.content: | |
partial_message = partial_message + chunk.choices[0].delta.content | |
yield partial_message | |
gr.ChatInterface(predict).launch(share=True) | |