Update app.py
Browse files
app.py
CHANGED
@@ -358,7 +358,8 @@ GOOGLEAI_MODELS = {
|
|
358 |
ANTHROPIC_MODELS = {
|
359 |
"claude-3-7-sonnet-20250219": 128000, # Claude 3.7 Sonnet
|
360 |
"claude-3-5-sonnet-20241022": 200000, # Claude 3.5 Sonnet
|
361 |
-
"claude-3-5-haiku-20240307": 200000, # Claude 3.5 Haiku
|
|
|
362 |
"claude-3-opus-20240229": 200000, # Claude 3 Opus
|
363 |
"claude-3-haiku-20240307": 200000, # Claude 3 Haiku
|
364 |
"claude-3-sonnet-20240229": 200000, # Claude 3 Sonnet
|
@@ -632,12 +633,9 @@ def get_model_info(provider, model_choice):
|
|
632 |
elif provider == "Together":
|
633 |
if model_choice in TOGETHER_MODELS:
|
634 |
return model_choice, TOGETHER_MODELS[model_choice]
|
635 |
-
elif provider == "
|
636 |
-
if model_choice in
|
637 |
-
return model_choice,
|
638 |
-
elif provider == "Cerebras":
|
639 |
-
if model_choice in CEREBRAS_MODELS:
|
640 |
-
return model_choice, CEREBRAS_MODELS[model_choice]
|
641 |
elif provider == "GoogleAI":
|
642 |
if model_choice in GOOGLEAI_MODELS:
|
643 |
return model_choice, GOOGLEAI_MODELS[model_choice]
|
@@ -707,7 +705,6 @@ def call_anthropic_api(payload, api_key_override=None):
|
|
707 |
# Try to import Anthropic
|
708 |
try:
|
709 |
import anthropic
|
710 |
-
from anthropic import Anthropic
|
711 |
except ImportError:
|
712 |
raise ImportError("Anthropic package not installed. Install it with: pip install anthropic")
|
713 |
|
@@ -715,7 +712,7 @@ def call_anthropic_api(payload, api_key_override=None):
|
|
715 |
if not api_key:
|
716 |
raise ValueError("Anthropic API key is required")
|
717 |
|
718 |
-
client = Anthropic(api_key=api_key)
|
719 |
|
720 |
# Extract parameters from payload
|
721 |
model = payload.get("model", "claude-3-5-sonnet-20241022")
|
@@ -726,32 +723,30 @@ def call_anthropic_api(payload, api_key_override=None):
|
|
726 |
# Format messages for Anthropic
|
727 |
# Find system message if any
|
728 |
system_prompt = None
|
729 |
-
|
730 |
|
731 |
for msg in messages:
|
732 |
if msg["role"] == "system":
|
733 |
system_prompt = msg["content"]
|
734 |
else:
|
735 |
-
#
|
736 |
if isinstance(msg["content"], list):
|
737 |
-
#
|
738 |
-
|
739 |
for item in msg["content"]:
|
740 |
if item["type"] == "text":
|
741 |
-
|
742 |
-
"type": "text",
|
743 |
"text": item["text"]
|
744 |
})
|
745 |
elif item["type"] == "image_url":
|
746 |
-
#
|
747 |
-
|
748 |
-
if
|
749 |
-
# Extract media type and base64 data
|
750 |
-
|
751 |
-
|
752 |
-
|
753 |
-
|
754 |
-
anthropic_content.append({
|
755 |
"type": "image",
|
756 |
"source": {
|
757 |
"type": "base64",
|
@@ -759,20 +754,14 @@ def call_anthropic_api(payload, api_key_override=None):
|
|
759 |
"data": base64_data
|
760 |
}
|
761 |
})
|
762 |
-
|
763 |
-
|
764 |
-
|
765 |
-
"type": "text",
|
766 |
-
"text": f"[Image URL: {image_url}]"
|
767 |
-
})
|
768 |
-
chat_messages.append({
|
769 |
-
"role": msg["role"],
|
770 |
-
"content": anthropic_content
|
771 |
})
|
772 |
else:
|
773 |
-
# Simple text
|
774 |
-
|
775 |
-
"role": msg["role"],
|
776 |
"content": msg["content"]
|
777 |
})
|
778 |
|
@@ -782,7 +771,7 @@ def call_anthropic_api(payload, api_key_override=None):
|
|
782 |
max_tokens=max_tokens,
|
783 |
temperature=temperature,
|
784 |
system=system_prompt,
|
785 |
-
messages=
|
786 |
)
|
787 |
|
788 |
return response
|
@@ -2865,8 +2854,8 @@ def create_app():
|
|
2865 |
return None
|
2866 |
|
2867 |
# Set up submission event
|
2868 |
-
def submit_message(message,
|
2869 |
-
together_model, anthropic_model, googleai_model,
|
2870 |
temperature, max_tokens, top_p, frequency_penalty, presence_penalty, repetition_penalty,
|
2871 |
top_k, min_p, seed, top_a, stream_output, response_format,
|
2872 |
images, documents, reasoning_effort, system_message, transforms,
|
@@ -2874,41 +2863,41 @@ def create_app():
|
|
2874 |
|
2875 |
"""Submit message to selected provider and model"""
|
2876 |
# Get the currently selected model
|
2877 |
-
model_choice = get_current_model(
|
2878 |
together_model, anthropic_model, googleai_model)
|
2879 |
|
2880 |
# Check if model is selected
|
2881 |
if not model_choice:
|
2882 |
-
error_message = f"Error: No model selected for provider {
|
2883 |
-
return
|
2884 |
{"role": "user", "content": message},
|
2885 |
{"role": "assistant", "content": error_message}
|
2886 |
]
|
2887 |
|
2888 |
# Select the appropriate API key based on the provider
|
2889 |
api_key_override = None
|
2890 |
-
if
|
2891 |
api_key_override = openrouter_api_key
|
2892 |
-
elif
|
2893 |
api_key_override = openai_api_key
|
2894 |
-
elif
|
2895 |
api_key_override = hf_api_key
|
2896 |
-
elif
|
2897 |
api_key_override = groq_api_key
|
2898 |
-
elif
|
2899 |
api_key_override = cohere_api_key
|
2900 |
-
elif
|
2901 |
api_key_override = together_api_key
|
2902 |
-
elif
|
2903 |
api_key_override = anthropic_api_key
|
2904 |
-
elif
|
2905 |
api_key_override = googleai_api_key
|
2906 |
|
2907 |
# Call the ask_ai function with the appropriate parameters
|
2908 |
return ask_ai(
|
2909 |
message=message,
|
2910 |
-
history=
|
2911 |
-
provider=
|
2912 |
model_choice=model_choice,
|
2913 |
temperature=temperature,
|
2914 |
max_tokens=max_tokens,
|
@@ -2958,7 +2947,7 @@ def create_app():
|
|
2958 |
temperature, max_tokens, top_p, frequency_penalty, presence_penalty, repetition_penalty,
|
2959 |
top_k, min_p, seed, top_a, stream_output, response_format,
|
2960 |
images, documents, reasoning_effort, system_message, transforms,
|
2961 |
-
openrouter_api_key, openai_api_key, hf_api_key, groq_api_key, cohere_api_key, together_api_key, googleai_api_key
|
2962 |
],
|
2963 |
outputs=chatbot,
|
2964 |
show_progress="minimal",
|
|
|
358 |
ANTHROPIC_MODELS = {
|
359 |
"claude-3-7-sonnet-20250219": 128000, # Claude 3.7 Sonnet
|
360 |
"claude-3-5-sonnet-20241022": 200000, # Claude 3.5 Sonnet
|
361 |
+
"claude-3-5-haiku-20240307": 200000, # Claude 3.5 Haiku
|
362 |
+
"claude-3-5-sonnet-20240620": 200000, # Claude 3.5 Sonnet 2024-06-20
|
363 |
"claude-3-opus-20240229": 200000, # Claude 3 Opus
|
364 |
"claude-3-haiku-20240307": 200000, # Claude 3 Haiku
|
365 |
"claude-3-sonnet-20240229": 200000, # Claude 3 Sonnet
|
|
|
633 |
elif provider == "Together":
|
634 |
if model_choice in TOGETHER_MODELS:
|
635 |
return model_choice, TOGETHER_MODELS[model_choice]
|
636 |
+
elif provider == "Anthropic":
|
637 |
+
if model_choice in ANTHROPIC_MODELS:
|
638 |
+
return model_choice, ANTHROPIC_MODELS[model_choice]
|
|
|
|
|
|
|
639 |
elif provider == "GoogleAI":
|
640 |
if model_choice in GOOGLEAI_MODELS:
|
641 |
return model_choice, GOOGLEAI_MODELS[model_choice]
|
|
|
705 |
# Try to import Anthropic
|
706 |
try:
|
707 |
import anthropic
|
|
|
708 |
except ImportError:
|
709 |
raise ImportError("Anthropic package not installed. Install it with: pip install anthropic")
|
710 |
|
|
|
712 |
if not api_key:
|
713 |
raise ValueError("Anthropic API key is required")
|
714 |
|
715 |
+
client = anthropic.Anthropic(api_key=api_key)
|
716 |
|
717 |
# Extract parameters from payload
|
718 |
model = payload.get("model", "claude-3-5-sonnet-20241022")
|
|
|
723 |
# Format messages for Anthropic
|
724 |
# Find system message if any
|
725 |
system_prompt = None
|
726 |
+
anthropic_messages = []
|
727 |
|
728 |
for msg in messages:
|
729 |
if msg["role"] == "system":
|
730 |
system_prompt = msg["content"]
|
731 |
else:
|
732 |
+
# Handle multimodal content if needed
|
733 |
if isinstance(msg["content"], list):
|
734 |
+
# For image handling
|
735 |
+
content_parts = []
|
736 |
for item in msg["content"]:
|
737 |
if item["type"] == "text":
|
738 |
+
content_parts.append({
|
739 |
+
"type": "text",
|
740 |
"text": item["text"]
|
741 |
})
|
742 |
elif item["type"] == "image_url":
|
743 |
+
# Handle base64 images if needed
|
744 |
+
img_url = item["image_url"]["url"]
|
745 |
+
if img_url.startswith("data:image"):
|
746 |
+
# Extract the media type and base64 data
|
747 |
+
media_type = img_url.split(";")[0].split(":")[1]
|
748 |
+
base64_data = img_url.split(",")[1]
|
749 |
+
content_parts.append({
|
|
|
|
|
750 |
"type": "image",
|
751 |
"source": {
|
752 |
"type": "base64",
|
|
|
754 |
"data": base64_data
|
755 |
}
|
756 |
})
|
757 |
+
anthropic_messages.append({
|
758 |
+
"role": "user" if msg["role"] == "user" else "assistant",
|
759 |
+
"content": content_parts
|
|
|
|
|
|
|
|
|
|
|
|
|
760 |
})
|
761 |
else:
|
762 |
+
# Simple text messages
|
763 |
+
anthropic_messages.append({
|
764 |
+
"role": "user" if msg["role"] == "user" else "assistant",
|
765 |
"content": msg["content"]
|
766 |
})
|
767 |
|
|
|
771 |
max_tokens=max_tokens,
|
772 |
temperature=temperature,
|
773 |
system=system_prompt,
|
774 |
+
messages=anthropic_messages
|
775 |
)
|
776 |
|
777 |
return response
|
|
|
2854 |
return None
|
2855 |
|
2856 |
# Set up submission event
|
2857 |
+
def submit_message(message, chatbot, provider_choice,
|
2858 |
+
openrouter_model, openai_model, hf_model, groq_model, cohere_model, together_model, anthropic_model, googleai_model,
|
2859 |
temperature, max_tokens, top_p, frequency_penalty, presence_penalty, repetition_penalty,
|
2860 |
top_k, min_p, seed, top_a, stream_output, response_format,
|
2861 |
images, documents, reasoning_effort, system_message, transforms,
|
|
|
2863 |
|
2864 |
"""Submit message to selected provider and model"""
|
2865 |
# Get the currently selected model
|
2866 |
+
model_choice = get_current_model(provider_choice, openrouter_model, openai_model, hf_model, groq_model, cohere_model,
|
2867 |
together_model, anthropic_model, googleai_model)
|
2868 |
|
2869 |
# Check if model is selected
|
2870 |
if not model_choice:
|
2871 |
+
error_message = f"Error: No model selected for provider {provider_choice}"
|
2872 |
+
return chatbot + [
|
2873 |
{"role": "user", "content": message},
|
2874 |
{"role": "assistant", "content": error_message}
|
2875 |
]
|
2876 |
|
2877 |
# Select the appropriate API key based on the provider
|
2878 |
api_key_override = None
|
2879 |
+
if provider_choice == "OpenRouter" and openrouter_api_key:
|
2880 |
api_key_override = openrouter_api_key
|
2881 |
+
elif provider_choice == "OpenAI" and openai_api_key:
|
2882 |
api_key_override = openai_api_key
|
2883 |
+
elif provider_choice == "HuggingFace" and hf_api_key:
|
2884 |
api_key_override = hf_api_key
|
2885 |
+
elif provider_choice == "Groq" and groq_api_key:
|
2886 |
api_key_override = groq_api_key
|
2887 |
+
elif provider_choice == "Cohere" and cohere_api_key:
|
2888 |
api_key_override = cohere_api_key
|
2889 |
+
elif provider_choice == "Together" and together_api_key:
|
2890 |
api_key_override = together_api_key
|
2891 |
+
elif provider_choice == "Anthropic" and anthropic_api_key:
|
2892 |
api_key_override = anthropic_api_key
|
2893 |
+
elif provider_choice == "GoogleAI" and googleai_api_key:
|
2894 |
api_key_override = googleai_api_key
|
2895 |
|
2896 |
# Call the ask_ai function with the appropriate parameters
|
2897 |
return ask_ai(
|
2898 |
message=message,
|
2899 |
+
history=chatbot,
|
2900 |
+
provider=provider_choice,
|
2901 |
model_choice=model_choice,
|
2902 |
temperature=temperature,
|
2903 |
max_tokens=max_tokens,
|
|
|
2947 |
temperature, max_tokens, top_p, frequency_penalty, presence_penalty, repetition_penalty,
|
2948 |
top_k, min_p, seed, top_a, stream_output, response_format,
|
2949 |
images, documents, reasoning_effort, system_message, transforms,
|
2950 |
+
openrouter_api_key, openai_api_key, hf_api_key, groq_api_key, cohere_api_key, together_api_key, anthropic_api_key, googleai_api_key
|
2951 |
],
|
2952 |
outputs=chatbot,
|
2953 |
show_progress="minimal",
|