Spaces:
Runtime error
Runtime error
import anthropic | |
import gradio as gr | |
from gradio.components import Dropdown, Checkbox,Textbox,IOComponent | |
import re | |
from ai import AnthropicCustom | |
from const import ClaudeModels,ModelTokenLength,Prompts | |
# Define a global variable for the conversation history | |
conversation_history = "" | |
async def interact_with_ai(user_question,token, model, token_length, prompt, prompt_input, memory): | |
global conversation_history | |
if memory: | |
prompt = Prompts[prompt].value.format(memory=conversation_history, question=user_question) | |
else: | |
prompt = Prompts[prompt].value.format(memory="", question=user_question) | |
if prompt_input != re.search(r'Human: (.*?) \n\nConversations:', prompt).group(1): | |
prompt = re.sub(r'Human: (.*?) \n\nConversations:', f'Human: {prompt_input} \n\nConversations:', prompt) | |
# Create an instance of the custom class | |
anth = AnthropicCustom(api_key=token, model=model, max_tokens=token_length, prompt= prompt) | |
# Create a generator to stream the response | |
response_accumulated = "" | |
async for response in anth.get_anthropic_response_async(): | |
response_accumulated += response | |
conversation_history = f"{conversation_history} {anthropic.HUMAN_PROMPT} {user_question} {anthropic.AI_PROMPT} {response_accumulated}" | |
yield response_accumulated | |
async def chat_with_ai(message, history, token,model, token_length, prompt, prompt_input, memory,): | |
global conversation_history | |
if memory: | |
for conversation in history: | |
user_question, response_accumulated = conversation | |
conversation_history = f"{conversation_history} {anthropic.HUMAN_PROMPT} {user_question} {anthropic.AI_PROMPT} {response_accumulated}" | |
prompt = Prompts[prompt].value.format(memory=history, question=message) | |
else: | |
prompt = Prompts[prompt].value.format(memory="", question=message) | |
if prompt_input != re.search(r'Human: (.*?) \n\nConversations:', prompt).group(1): | |
prompt = re.sub(r'Human: (.*?) \n\nConversations:', f'Human: {prompt_input} \n\nConversations:', prompt) | |
# Create an instance of the custom class | |
anth = AnthropicCustom(api_key=token, model=model, max_tokens=token_length, prompt= prompt) | |
# Create a generator to stream the response | |
response_accumulated = "" | |
async for response in anth.get_anthropic_response_async(): | |
response_accumulated += response | |
yield response_accumulated | |
promptDropdown:IOComponent = Dropdown(choices=list(Prompts.__members__.keys()),label="Prompt",value=list(Prompts.__members__.keys())[0]) | |
prompt_input :IOComponent = Textbox(label="Custom Prompt", placeholder="Enter a custom prompt here", lines=3, value=re.search(r'Human: (.*?) \n\nConversations:', Prompts[promptDropdown.value].value).group(1), ) | |
iface = gr.Interface(fn=interact_with_ai, | |
flagging_options=["Inappropriate", "Disrespectful", "Spam"], | |
allow_flagging='auto', | |
title="Claude Space", | |
inputs=[Textbox(label="Question", placeholder="Enter a question here"),Textbox(label="Token", placeholder="Enter a token here",type='password'),Dropdown(choices=[model.value for model in ClaudeModels],label="Model",value=[model.value for model in ClaudeModels][0]),Dropdown(choices=[token.value for token in ModelTokenLength],label="Token Length",value= [token.value for token in ModelTokenLength][0]),promptDropdown,prompt_input,Checkbox(label="Memory", value=False)], | |
outputs="markdown", | |
cache_examples=True, | |
) | |
promptDropdown:IOComponent = Dropdown(choices=list(Prompts.__members__.keys()),label="Prompt",value=list(Prompts.__members__.keys())[0]) | |
prompt_input :IOComponent = Textbox(label="Custom Prompt", placeholder="Enter a custom prompt here", lines=3, value=re.search(r'Human: (.*?) \n\nConversations:', Prompts[promptDropdown.value].value).group(1), ) | |
cface = gr.ChatInterface(fn=chat_with_ai,additional_inputs=[Textbox(label="Token", placeholder="Enter a token here",type='password'),Dropdown(choices=[model.value for model in ClaudeModels],label="Model",value=[model.value for model in ClaudeModels][0]),Dropdown(choices=[token.value for token in ModelTokenLength],label="Token Length",value= [token.value for token in ModelTokenLength][0]),promptDropdown,prompt_input,Checkbox(label="Memory", value=True)]) | |
if __name__ == "__main__": | |
gd = gr.TabbedInterface([iface, cface], tab_names=["Claude Space", "Claude Chat"],title="Claude Space") | |
gd.queue(concurrency_count=75, max_size=100).launch(debug=True, share=False,server_name='0.0.0.0', server_port=7864) | |