Spaces:
Running
Running
File size: 2,841 Bytes
dfa45b1 df6d4c7 dfa45b1 d2062a1 f4619cd 99bcb72 b700bc7 aeb58f7 818de7f dfa45b1 86db91b e04b550 dfa45b1 258723e 21b067a 6a833cd dfa45b1 6a833cd dfa45b1 b1bfa22 3ecccd4 7311f58 107ff84 7311f58 dfa45b1 df6d4c7 d2062a1 107ff84 4f90196 818de7f d2062a1 60d519c d2062a1 e0a3db3 4f90196 818de7f d2062a1 b1bfa22 5bbf5a8 c5b702c 818de7f e5fd1ef 8dc7eaa 3ecccd4 dfa45b1 df6d4c7 fc5d92b dfa45b1 0c96e18 dfa45b1 818de7f 6aa6e6e 818de7f 0c96e18 f2f3bd1 4f90196 dfa45b1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
import gradio as gr
import logging, os, sys, time
from agent_langchain import agent_langchain
from agent_llamaindex import agent_llamaindex
from openai import OpenAI
from trace import trace_wandb
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())
AGENT_OFF = "Off"
AGENT_LANGCHAIN = "LangChain"
AGENT_LLAMAINDEX = "LlamaIndex"
config = {
"model": "gpt-4o",
"temperature": 0
}
logging.basicConfig(stream = sys.stdout, level = logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream = sys.stdout))
def invoke(openai_api_key, prompt, agent_option):
if (openai_api_key == ""):
raise gr.Error("OpenAI API Key is required.")
if (prompt == ""):
raise gr.Error("Prompt is required.")
if (agent_option is None):
raise gr.Error("Use Agent is required.")
os.environ["OPENAI_API_KEY"] = openai_api_key
completion = ""
result = ""
callback = ""
err_msg = ""
try:
start_time_ms = round(time.time() * 1000)
if (agent_option == AGENT_LANGCHAIN):
completion, callback = agent_langchain(
config,
prompt
)
result = completion["output"]
elif (agent_option == AGENT_LLAMAINDEX):
result = agent_llamaindex(
config,
prompt
)
else:
client = OpenAI()
completion = client.chat.completions.create(
messages = [{"role": "user", "content": prompt}],
model = config["model"],
temperature = config["temperature"]
)
callback = completion.usage
result = completion.choices[0].message.content
except Exception as e:
err_msg = e
raise gr.Error(e)
finally:
end_time_ms = round(time.time() * 1000)
trace_wandb(
config,
agent_option,
prompt,
completion,
result,
callback,
err_msg,
start_time_ms,
end_time_ms
)
return result
gr.close_all()
demo = gr.Interface(
fn = invoke,
inputs = [gr.Textbox(label = "OpenAI API Key", type = "password", lines = 1),
gr.Textbox(label = "Prompt", lines = 1,
value = "How does current weather in San Francisco and Paris compare in metric and imperial system? Answer in JSON format and include today's date."),
gr.Radio([AGENT_OFF, AGENT_LANGCHAIN, AGENT_LLAMAINDEX], label = "Use Agent", value = AGENT_LANGCHAIN)],
outputs = [gr.Textbox(label = "Completion", value=os.environ["OUTPUT"])],
title = "Agentic Reasoning Application",
description = os.environ["DESCRIPTION"]
)
demo.launch() |