Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -239,7 +239,7 @@ def install_custom_packages(package_list):
|
|
239 |
|
240 |
@st.cache_resource(ttl=3600)
|
241 |
def init_ai_models(model_name=None):
|
242 |
-
try:
|
243 |
# Get the GitHub token from secrets
|
244 |
github_token = get_secret("github_token_api")
|
245 |
if not github_token:
|
@@ -251,23 +251,46 @@ def init_ai_models(model_name=None):
|
|
251 |
model_name = "gpt-4o"
|
252 |
|
253 |
with st.spinner(f"Initializing AI model: {model_name}..."):
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
credential
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
271 |
except Exception as e:
|
272 |
st.error(f"Failed to load AI models: {str(e)}")
|
273 |
logger.error(f"AI model initialization error: {str(e)}")
|
@@ -279,6 +302,8 @@ def suggest_code_completion(code_snippet, models):
|
|
279 |
return None
|
280 |
|
281 |
try:
|
|
|
|
|
282 |
prompt = f"""Write a complete Manim animation scene based on this code or idea:
|
283 |
{code_snippet}
|
284 |
|
@@ -291,8 +316,6 @@ The code should be a complete, working Manim animation that includes:
|
|
291 |
Here's the complete Manim code:
|
292 |
"""
|
293 |
with st.spinner("AI is generating your animation code..."):
|
294 |
-
from azure.ai.inference.models import UserMessage
|
295 |
-
|
296 |
# Generate the response using Azure Inference API
|
297 |
response = models["client"].complete(
|
298 |
messages=[
|
|
|
239 |
|
240 |
@st.cache_resource(ttl=3600)
|
241 |
def init_ai_models(model_name=None):
|
242 |
+
try:
|
243 |
# Get the GitHub token from secrets
|
244 |
github_token = get_secret("github_token_api")
|
245 |
if not github_token:
|
|
|
251 |
model_name = "gpt-4o"
|
252 |
|
253 |
with st.spinner(f"Initializing AI model: {model_name}..."):
|
254 |
+
try:
|
255 |
+
# Import Azure modules here to prevent import errors
|
256 |
+
from azure.ai.inference import ChatCompletionsClient
|
257 |
+
from azure.ai.inference.models import UserMessage
|
258 |
+
from azure.core.credentials import AzureKeyCredential
|
259 |
+
|
260 |
+
# Setup Azure Inference client with proper endpoint
|
261 |
+
endpoint = "https://models.inference.ai.azure.com"
|
262 |
+
|
263 |
+
# Create the client with proper credential
|
264 |
+
client = ChatCompletionsClient(
|
265 |
+
endpoint=endpoint,
|
266 |
+
credential=AzureKeyCredential(github_token),
|
267 |
+
)
|
268 |
+
|
269 |
+
# Test the client with a simple request to verify it works
|
270 |
+
try:
|
271 |
+
test_response = client.complete(
|
272 |
+
messages=[UserMessage("Hello")],
|
273 |
+
max_tokens=10,
|
274 |
+
model=model_name
|
275 |
+
)
|
276 |
+
if not test_response or not test_response.choices:
|
277 |
+
st.error(f"API connection test failed for model {model_name}")
|
278 |
+
return None
|
279 |
+
except Exception as api_err:
|
280 |
+
st.error(f"API test failed: {str(api_err)}")
|
281 |
+
logger.error(f"API test error: {str(api_err)}")
|
282 |
+
return None
|
283 |
+
|
284 |
+
logger.info(f"Successfully initialized AI model: {model_name}")
|
285 |
+
return {
|
286 |
+
"client": client,
|
287 |
+
"model_name": model_name,
|
288 |
+
"last_loaded": datetime.now().isoformat()
|
289 |
+
}
|
290 |
+
except ImportError as ie:
|
291 |
+
st.error(f"Azure AI modules not available: {str(ie)}")
|
292 |
+
logger.error(f"Import error: {str(ie)}")
|
293 |
+
return None
|
294 |
except Exception as e:
|
295 |
st.error(f"Failed to load AI models: {str(e)}")
|
296 |
logger.error(f"AI model initialization error: {str(e)}")
|
|
|
302 |
return None
|
303 |
|
304 |
try:
|
305 |
+
from azure.ai.inference.models import UserMessage
|
306 |
+
|
307 |
prompt = f"""Write a complete Manim animation scene based on this code or idea:
|
308 |
{code_snippet}
|
309 |
|
|
|
316 |
Here's the complete Manim code:
|
317 |
"""
|
318 |
with st.spinner("AI is generating your animation code..."):
|
|
|
|
|
319 |
# Generate the response using Azure Inference API
|
320 |
response = models["client"].complete(
|
321 |
messages=[
|