neuralleap commited on
Commit
eff1878
·
verified ·
1 Parent(s): feaa77d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -51
app.py CHANGED
@@ -42,76 +42,99 @@ client = None
42
  if openai_api_key:
43
  client = OpenAI(api_key=openai_api_key)
44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  # Available models with descriptions and token limits
46
  AVAILABLE_MODELS = {
47
- # "gpt-3.5-turbo": {
48
- # "description": "Fast and cost-effective",
49
- # "max_tokens": 4096,
50
- # "output_tokens": 500,
51
- # "temperature": 0.7
52
- # },
53
- # "gpt-3.5-turbo-16k": {
54
- # "description": "Longer context window",
55
- # "max_tokens": 16384,
56
- # "output_tokens": 1000,
57
- # "temperature": 0.7
58
- # },
59
- # "gpt-4": {
60
- # "description": "More capable but slower",
61
- # "max_tokens": 8192,
62
- # "output_tokens": 800,
63
- # "temperature": 0.7
64
- # },
65
- # "gpt-4-turbo": {
66
- # "description": "Most powerful model (if available)",
67
- # "max_tokens": 128000,
68
- # "output_tokens": 1200,
69
- # "temperature": 0.7
70
- # },
71
  "gpt-4o": {
72
  "description": "Latest GPT-4 Omni model",
73
  "max_tokens": 128000,
74
  "output_tokens": 1200,
75
- "temperature": 0.7
76
  },
77
- # "gpt-4o-mini": {
78
- # "description": "Efficient version of GPT-4o",
79
- # "max_tokens": 128000,
80
- # "output_tokens": 1000,
81
- # "temperature": 0.7
82
- # },
83
- # "o1-mini": {
84
- # "description": "OpenAI Reasoning Model - Mini",
85
- # "max_tokens": 180000,
86
- # "output_tokens": 1000,
87
- # "temperature": 0.7
88
- # },
89
  "o1": {
90
  "description": "OpenAI Reasoning Model - Standard",
91
  "max_tokens": 200000,
92
  "output_tokens": 1200,
93
- "temperature": 0.7
94
  },
95
- # "o1-pro": {
96
- # "description": "OpenAI Reasoning Model - Professional",
97
- # "max_tokens": 200000,
98
- # "output_tokens": 1500,
99
- # "temperature": 0.7
100
- # },
101
  "o3-mini": {
102
  "description": "OpenAI Advanced Reasoning - Mini",
103
  "max_tokens": 200000,
104
  "output_tokens": 1000,
105
- "temperature": 0.7
106
  }
107
- # "o3-mini-2025-01-31": {
108
- # "description": "OpenAI Advanced Reasoning - Enhanced",
109
- # "max_tokens": 200000,
110
- # "output_tokens": 1200,
111
- # "temperature": 0.7
112
- # }
113
  }
114
 
 
115
  # Function to call OpenAI API
116
  def get_ai_response(prompt, history):
117
  if not client:
 
42
  if openai_api_key:
43
  client = OpenAI(api_key=openai_api_key)
44
 
45
+ # Available models with descriptions and token limits
46
+ # AVAILABLE_MODELS = {
47
+ # # "gpt-3.5-turbo": {
48
+ # # "description": "Fast and cost-effective",
49
+ # # "max_tokens": 4096,
50
+ # # "output_tokens": 500,
51
+ # # "temperature": 0.7
52
+ # # },
53
+ # # "gpt-3.5-turbo-16k": {
54
+ # # "description": "Longer context window",
55
+ # # "max_tokens": 16384,
56
+ # # "output_tokens": 1000,
57
+ # # "temperature": 0.7
58
+ # # },
59
+ # # "gpt-4": {
60
+ # # "description": "More capable but slower",
61
+ # # "max_tokens": 8192,
62
+ # # "output_tokens": 800,
63
+ # # "temperature": 0.7
64
+ # # },
65
+ # # "gpt-4-turbo": {
66
+ # # "description": "Most powerful model (if available)",
67
+ # # "max_tokens": 128000,
68
+ # # "output_tokens": 1200,
69
+ # # "temperature": 0.7
70
+ # # },
71
+ # "gpt-4o": {
72
+ # "description": "Latest GPT-4 Omni model",
73
+ # "max_tokens": 128000,
74
+ # "output_tokens": 1200,
75
+ # "temperature": 0.7
76
+ # },
77
+ # # "gpt-4o-mini": {
78
+ # # "description": "Efficient version of GPT-4o",
79
+ # # "max_tokens": 128000,
80
+ # # "output_tokens": 1000,
81
+ # # "temperature": 0.7
82
+ # # },
83
+ # # "o1-mini": {
84
+ # # "description": "OpenAI Reasoning Model - Mini",
85
+ # # "max_tokens": 180000,
86
+ # # "output_tokens": 1000,
87
+ # # "temperature": 0.7
88
+ # # },
89
+ # "o1": {
90
+ # "description": "OpenAI Reasoning Model - Standard",
91
+ # "max_tokens": 200000,
92
+ # "output_tokens": 1200,
93
+ # "temperature": 0.7
94
+ # },
95
+ # # "o1-pro": {
96
+ # # "description": "OpenAI Reasoning Model - Professional",
97
+ # # "max_tokens": 200000,
98
+ # # "output_tokens": 1500,
99
+ # # "temperature": 0.7
100
+ # # },
101
+ # "o3-mini": {
102
+ # "description": "OpenAI Advanced Reasoning - Mini",
103
+ # "max_tokens": 200000,
104
+ # "output_tokens": 1000,
105
+ # "temperature": 0.7
106
+ # }
107
+ # # "o3-mini-2025-01-31": {
108
+ # # "description": "OpenAI Advanced Reasoning - Enhanced",
109
+ # # "max_tokens": 200000,
110
+ # # "output_tokens": 1200,
111
+ # # "temperature": 0.7
112
+ # # }
113
+ # }
114
+
115
  # Available models with descriptions and token limits
116
  AVAILABLE_MODELS = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  "gpt-4o": {
118
  "description": "Latest GPT-4 Omni model",
119
  "max_tokens": 128000,
120
  "output_tokens": 1200,
121
+ "temperature": None # Does not support temperature
122
  },
 
 
 
 
 
 
 
 
 
 
 
 
123
  "o1": {
124
  "description": "OpenAI Reasoning Model - Standard",
125
  "max_tokens": 200000,
126
  "output_tokens": 1200,
127
+ "temperature": None # Does not support temperature
128
  },
 
 
 
 
 
 
129
  "o3-mini": {
130
  "description": "OpenAI Advanced Reasoning - Mini",
131
  "max_tokens": 200000,
132
  "output_tokens": 1000,
133
+ "temperature": None # Does not support temperature
134
  }
 
 
 
 
 
 
135
  }
136
 
137
+
138
  # Function to call OpenAI API
139
  def get_ai_response(prompt, history):
140
  if not client: