File size: 6,875 Bytes
3702f2a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
# api_key_manager.py

from dotenv import load_dotenv; load_dotenv()
import os
import firebase_admin
from firebase_admin import credentials, firestore
import datetime
from pytz import UTC

# Initialize Firebase Admin SDK
cred = credentials.Certificate({
    "type": "service_account",
    "project_id": os.environ.get("FIREBASE_PROJECT_ID"),
    "private_key_id": os.environ.get("FIREBASE_PRIVATE_KEY_ID"),
    "private_key": os.environ.get("FIREBASE_PRIVATE_KEY").replace("\\n", "\n"),
    "client_email": os.environ.get("FIREBASE_CLIENT_EMAIL"),
    "client_id": os.environ.get("FIREBASE_CLIENT_ID"),
    "auth_uri": "https://accounts.google.com/o/oauth2/auth",
    "token_uri": "https://oauth2.googleapis.com/token",
    "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
    "client_x509_cert_url": os.environ.get("FIREBASE_CLIENT_X509_CERT_URL")
})

try:
    firebase_admin.get_app()
except ValueError:
    firebase_admin.initialize_app(cred)

db = firestore.client()

def get_user_info(api_key):
    doc = db.collection('Users').document(api_key).get(field_paths=[
        'subscription_plan', 'requests_remaining', 'paid_models', 'ai_engine_secret_key'
    ])
    if doc.exists:
        return doc.to_dict()
    else:
        return None

def check_api_key_validity(api_key, check_rate_limit=True):
    user_data = get_user_info(api_key)
    if not user_data:
        return False, "Invalid API key"
   
    if check_rate_limit:
        if user_data.get('requests_remaining', 0) <= 0:
            return False, "Rate limit exceeded. Kindly Upgrade to Premium for using the Devs Do Code API"
    return True, ""

def update_request_count(api_key, credits_used):
    user_ref = db.collection('Users').document(api_key)
    user_doc = user_ref.get(field_paths=['requests_remaining'])
    if user_doc.exists:
        requests_remaining = user_doc.to_dict().get('requests_remaining', 0)
        new_requests_remaining = requests_remaining - credits_used  # Subtract credits used
        user_ref.update({'requests_remaining': new_requests_remaining})
        return True
    else:
        return False

def get_rate_limit_status(api_key):
    user_doc = db.collection('Users').document(api_key).get(field_paths=['requests_remaining'])
    if not user_doc.exists:
        return {"error": "Invalid API key"}
   
    requests_remaining = user_doc.to_dict().get('requests_remaining', 0)
    return {"requests_remaining": requests_remaining}

def get_subscription_status(api_key):
    user_doc = db.collection('Users').document(api_key).get(field_paths=['subscription_plan'])
    if not user_doc.exists:
        return {"error": "Invalid API key"}
   
    subscription_plan = user_doc.to_dict().get('subscription_plan', 'Free')
    return {"subscription_plan": subscription_plan}

def get_available_models():
    models = {
        "gpt-4o": {
            "name": "gpt-4o-2024-08-06",
            "description": "Our high-intelligence flagship model for complex, multi-step tasks. GPT-4o is multimodal, processing text, audio, and images, and is faster and 50% cheaper than its predecessor. It has a maximum output of 16,384 tokens, if o1-mini is used",
            "max_tokens": 4096,
            "context_window": 128000,
            "training_data": "Up to October 2023"
            },
        "o1-preview": {
            "name": "o1-preview-2024-09-12",
            "description": "OpenAI o1 is a new large language model optimized for complex reasoning tasks, utilizing reinforcement learning to enhance its problem-solving capabilities. It excels in STEM subjects and demonstrates impressive performance on challenging benchmarks, including achieving 83% accuracy on the AIME mathematics competition, significantly outperforming previous models like GPT-4o.",
            "max_tokens": 32768,
            "context_window": 128000,
            "training_data": "Up to September 2024"
            },
        "claude-3.5-sonnet": {
            "name": "claude-3.5-sonnet-latest",
            "description": "Claude 3.5 Sonnet is a highly capable model developed by Anthropic, optimized for complex reasoning tasks and demonstrating significant advancements in performance compared to its predecessors. It excels in areas such as coding, multi-step workflows, and interpreting visual data.",
            "context_window": 8192,
            "max_tokens": 200000,
            "training_data": "Up to April 2024"
            },
        "gemini-1.5-pro": {
            "name": "Gemini-1.5-Pro",
            "description": "Gemini is an AI chatbot developed by Google. Formerly known as Bard, it was released to the public in 2023. Gemini-1.5-Pro is an improved version of the model, offering more accurate and informative responses.",
            "max_tokens": 4096,
            "context_window": 131072,
            "training_data": "Up to 2023"
            },
        "gemini-1-5-flash": {
            "name": "Gemini 1.5 Flash",
            "description": "A lighter-weight model than 1.5 Pro, designed to be fast and efficient to serve at scale. It's optimized for high-volume, high-frequency tasks and features a breakthrough long context window, making it highly capable of multimodal reasoning across vast amounts of information.",
            "max_tokens": 4096,
            "context_window": 131072, 
            "training_data": "Up to 2024"
            },
        "o1-mini": {
            "name": "o1-mini",
            "description": "The o1-mini model is a cost-effective and efficient AI model designed for quick reasoning tasks, particularly in STEM fields such as coding and mathematics. It is optimized for speed and lower resource consumption, making it an ideal choice for applications requiring fast responses without the need for extensive world knowledge.",
            "max_tokens": 65536,
            "context_window": 128000,
            "training_data": "Up to October 2023"
        },
        "meta-llama-405b-turbo": {
            "name": "Meta-Llama-3.1-405B-Instruct-Turbo",
            "description": "Meta-Llama-3.1-405B-Instruct-Turbo is a state-of-the-art AI model designed for instruction-based tasks. It excels in generating coherent and contextually relevant responses, making it suitable for applications in conversational AI, content creation, and more. This model leverages advanced techniques in natural language processing to provide high-quality outputs.",
            "max_tokens": 4096,
            "context_window": 128000,
            "training_data": "Up to October 2023"
        }
    }
    return models

def get_model_info(model_name):
    models = get_available_models()
    return models.get(model_name)

if __name__ == "__main__":
    print(get_model_info("gpt4o"))