ReyDev's picture
✨ feat(ai.py): add traceable decorator to get_anthropic_response methods for better debugging and tracking
e6f89d5 unverified
raw
history blame
8.96 kB
from typing import AsyncGenerator
import gradio as gr
from anthropic import AI_PROMPT, HUMAN_PROMPT
from gradio.components import Checkbox, Dropdown, IOComponent, Markdown, Textbox
from gradio.utils import async_iteration
from claude_space.ai import AnthropicCustom
from claude_space.const import (
ClaudeDefaultFormatter,
ClaudeModels,
ModelTokenLength,
Prompts,
)
from claude_space.settings import settings
conversation_history = ""
async def interact_with_ai(
user_question, token, model, token_length, prompt, prompt_input, memory
):
global conversation_history
if token is None or token == "" and settings.ANTHROPIC_API_KEY is None:
raise TypeError(
"You have not provided an API key. Please provide one in the textbox."
)
if memory:
finalPrompt = ClaudeDefaultFormatter.memory.value.format(
memory=conversation_history,
question=user_question,
prompt=Prompts[prompt].value,
)
else:
finalPrompt = ClaudeDefaultFormatter.memory.value.format(
memory="", question=user_question, prompt=Prompts[prompt].value
)
if prompt_input != Prompts[prompt].value:
prompt = ClaudeDefaultFormatter.memory.value.format(
memory=conversation_history, question=user_question, prompt=prompt_input
)
anth = AnthropicCustom(
api_key=token, model=model, max_tokens=token_length, prompt=finalPrompt
)
response_accumulated = ""
async for response in anth.get_anthropic_response_async():
response_accumulated += response
conversation_history = f"{conversation_history} {HUMAN_PROMPT} {user_question} {AI_PROMPT} {response_accumulated}"
yield response_accumulated
async def chat_with_ai(
message,
history,
token,
model,
token_length,
prompt,
prompt_input,
memory,
):
global conversation_history
if token is None or token == "" and settings.ANTHROPIC_API_KEY is None:
raise TypeError(
"You have not provided an API key. Please provide one in the textbox."
)
if memory:
for conversation in history:
user_question, response_accumulated = conversation
conversation_history = f"{conversation_history} {HUMAN_PROMPT} {user_question} {AI_PROMPT} {response_accumulated}"
finalPrompt = ClaudeDefaultFormatter.memory.value.format(
memory=conversation_history, question=message, prompt=Prompts[prompt].value
)
else:
finalPrompt = ClaudeDefaultFormatter.memory.value.format(
memory="", question=message, prompt=Prompts[prompt].value
)
if prompt_input != Prompts[prompt].value:
prompt = ClaudeDefaultFormatter.memory.value.format(
memory=conversation_history, question=message, prompt=prompt_input
)
anth = AnthropicCustom(
api_key=token, model=model, max_tokens=token_length, prompt=finalPrompt
)
response_accumulated = ""
async for response in anth.get_anthropic_response_async():
response_accumulated += response
yield response_accumulated
def add_file(history, file):
history = history + [((file.name,), None)]
return history
def clear_and_save_textbox(message: str) -> tuple[str, str]:
return "", message
def display_input(
message: str, history: list[list[str | None]]
) -> tuple[list[list[str | None]], list[list[str | None]]]:
history.append([message, None])
return history, history
async def stream_fn(
message: str,
history_with_input: list[list[str | None]],
*args,
) -> AsyncGenerator:
history = history_with_input[:-1]
generator = chat_with_ai(message, history, *args)
try:
first_response = await async_iteration(generator)
update = history + [[message, first_response]]
yield update, update
except StopIteration:
update = history + [[message, None]]
yield update, update
async for response in generator:
update = history + [[message, response]]
yield update, update
with gr.Blocks() as cface:
with gr.Row(variant="compact", equal_height=True):
with gr.Column(scale=1):
t: IOComponent = Textbox(
label="Token",
info="You'll get this token from Anthropic console and this is mandatory",
placeholder="Enter a token here",
type="password",
)
m: IOComponent = Dropdown(
choices=[model.value for model in ClaudeModels],
label="Model",
value=[model.value for model in ClaudeModels][0],
)
dL: IOComponent = Dropdown(
choices=[token.value for token in ModelTokenLength],
label="Token Length",
value=[token.value for token in ModelTokenLength][0],
)
pd: IOComponent = Dropdown(
choices=list(Prompts.__members__.keys()),
label="Prompt",
value=list(Prompts.__members__.keys())[0],
)
pi: IOComponent = Textbox(
label="Custom Prompt",
placeholder="Enter a custom prompt here",
lines=3,
value=Prompts[pd.value].value,
)
c: IOComponent = Checkbox(label="Memory", value=True)
btn: IOComponent = gr.Button("Send")
with gr.Column(scale=3):
chatbot = gr.Chatbot(
[],
elem_id="chatbot",
)
with gr.Row():
msg = gr.Textbox(
scale=4,
show_label=False,
placeholder="Enter text and press enter, or upload an image",
container=False,
)
btn = gr.UploadButton("📁", file_types=["image"])
clear = gr.ClearButton([msg, chatbot])
pd.change(
lambda choice: gr.update(value=Prompts[choice].value), inputs=pd, outputs=pi
)
saved_input = gr.State()
chatbot_state = gr.State([])
text_msg = (
msg.submit(
clear_and_save_textbox,
[msg],
[msg, saved_input],
api_name=False,
queue=False,
)
.then(
display_input,
[saved_input, chatbot_state],
[chatbot, chatbot_state],
api_name=False,
queue=False,
)
.then(
stream_fn,
[saved_input, chatbot_state, t, m, dL, pd, pi, c],
[chatbot, chatbot_state],
api_name=False,
)
)
# file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(
# chat_with_ai, chatbot, chatbot, t, m, dL, pd, pi, c
# )
with gr.Blocks() as iface:
with gr.Row(variant="compact", equal_height=True):
with gr.Column(scale=1):
q: IOComponent = Textbox(
label="Question", placeholder="Enter a question here"
)
t: IOComponent = Textbox(
label="Token",
info="You'll get this token from Anthropic console and this is mandatory",
placeholder="Enter a token here",
type="password",
)
m: IOComponent = Dropdown(
choices=[model.value for model in ClaudeModels],
label="Model",
value=[model.value for model in ClaudeModels][0],
)
dL: IOComponent = Dropdown(
choices=[token.value for token in ModelTokenLength],
label="Token Length",
value=[token.value for token in ModelTokenLength][0],
)
pd: IOComponent = Dropdown(
choices=list(Prompts.__members__.keys()),
label="Prompt",
value=list(Prompts.__members__.keys())[0],
)
pi: IOComponent = Textbox(
label="Custom Prompt",
placeholder="Enter a custom prompt here",
lines=3,
value=Prompts[pd.value].value,
)
c: IOComponent = Checkbox(label="Memory", value=False)
btn: IOComponent = gr.Button("Send")
with gr.Column(scale=3):
mk: IOComponent = Markdown(" ")
pd.change(
lambda choice: gr.update(value=Prompts[choice].value), inputs=pd, outputs=pi
)
btn.click(
interact_with_ai,
inputs=[q, t, m, dL, pd, pi, c],
outputs=mk,
)
gd = gr.TabbedInterface(
[iface, cface], tab_names=["Claude Evals", "Claude Chat"], title="Claude Space"
)
gd.queue(concurrency_count=75, max_size=100).launch(
debug=True,
share=False,
server_name="0.0.0.0",
server_port=7860,
show_error=True,
show_tips=True,
)