File size: 2,218 Bytes
5e0ca0f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import json
import base64
from datetime import datetime
from typing import Optional

class JWTAnalyzer:
    def analyze(self, AIModels, token, openai_api_token: Optional[str], bard_api_token: Optional[str], llama_api_token: Optional[str], llama_endpoint: Optional[str], AI: str):
        try:
            parts = token.split('.')
            if len(parts) != 3:
                raise ValueError("Invalid token format. Expected 3 parts.")
            header = json.loads(base64.urlsafe_b64decode(parts[0] + '===').decode('utf-8', 'replace'))
            payload = json.loads(base64.urlsafe_b64decode(parts[1] + '===').decode('utf-8', 'replace'))
            algorithm_used = header.get('alg', 'Unknown Algorithm')
            expiration_time = datetime.utcfromtimestamp(payload['exp']) if 'exp' in payload else None

            analysis_result = {
                'Algorithm Used': algorithm_used,
                'Decoded Payload': payload,
                'Claims': json.dumps(payload, indent=4),
                'Expires At': expiration_time.strftime('%Y-%m-%d %H:%M:%S') if expiration_time else 'Not Expired'
            }
            
            response = ""
            match AI:
                case 'openai':
                    if openai_api_token is None:
                        raise ValueError("KeyNotFound: Key Not Provided")
                    response = AIModels.gpt_ai(openai_api_token, json.dumps(analysis_result))
                case 'bard':
                    if bard_api_token is None:
                        raise ValueError("KeyNotFound: Key Not Provided")
                    response = AIModels.BardAI(bard_api_token, json.dumps(analysis_result))
                case 'llama':
                    response = AIModels.llama_AI(json.dumps(analysis_result), "local", llama_api_token, llama_endpoint)
                case 'llama-api':
                    response = AIModels.llama_AI(json.dumps(analysis_result), "runpod", llama_api_token, llama_endpoint)
        except jwt.ExpiredSignatureError:
            analysis_result = {'Error': 'Token has expired.'}
        except jwt.InvalidTokenError as e:
            analysis_result = {'Error': f'Invalid token: {e}'}
        return response