shukdevdattaEX commited on
Commit
d10e8ed
·
verified ·
1 Parent(s): 493d21f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -630
app.py CHANGED
@@ -1,643 +1,42 @@
1
- import gradio as gr
2
- import json
3
  import os
4
- import time
5
- from datetime import datetime
6
- from typing import List, Dict, Any, Optional, Tuple
7
  import tempfile
8
- import base64
9
- from pathlib import Path
10
 
11
- # Core dependencies
12
- try:
13
- from together import Together
14
- import speech_recognition as sr
15
- import io
16
- import subprocess
17
- import sys
18
- except ImportError as e:
19
- print(f"Missing dependency: {e}")
20
- print("Install with: pip install together speechrecognition pyaudio")
21
- sys.exit(1)
22
 
23
- class ConversationMemory:
24
- """Manages conversation context and memory across sessions"""
25
-
26
- def __init__(self):
27
- self.conversations = []
28
- self.context_graph = {}
29
- self.session_data = {}
30
-
31
- def add_interaction(self, input_type: str, content: str, response: str, metadata: Dict = None):
32
- interaction = {
33
- "timestamp": datetime.now().isoformat(),
34
- "input_type": input_type,
35
- "content": content[:500] + "..." if len(content) > 500 else content,
36
- "response": response[:1000] + "..." if len(response) > 1000 else response,
37
- "metadata": metadata or {}
38
- }
39
- self.conversations.append(interaction)
40
-
41
- def clear_history(self):
42
- """Clear conversation history"""
43
- self.conversations = []
44
- self.context_graph = {}
45
- self.session_data = {}
46
-
47
- def get_relevant_context(self, query: str, limit: int = 3) -> List[Dict]:
48
- relevant = []
49
- query_lower = query.lower()
50
-
51
- for conv in reversed(self.conversations[-10:]):
52
- score = 0
53
- content_lower = conv["content"].lower()
54
- response_lower = conv["response"].lower()
55
-
56
- for word in query_lower.split():
57
- if len(word) > 3:
58
- if word in content_lower or word in response_lower:
59
- score += 1
60
-
61
- if score > 0:
62
- relevant.append((score, conv))
63
-
64
- relevant.sort(key=lambda x: x[0], reverse=True)
65
- return [conv for score, conv in relevant[:limit]]
66
 
67
- class NexusAI:
68
- """Main AI processing class"""
69
-
70
- def __init__(self, api_key: str = None):
71
- self.api_key = api_key
72
- self.client = None
73
- self.memory = ConversationMemory()
74
-
75
- if api_key:
76
- self.initialize_client(api_key)
77
-
78
- def initialize_client(self, api_key: str):
79
- """Initialize Together AI client"""
80
- try:
81
- self.client = Together(api_key=api_key)
82
- self.api_key = api_key
83
- return True, "✅ API key initialized successfully!"
84
- except Exception as e:
85
- return False, f"❌ Failed to initialize API key: {str(e)}"
86
-
87
- def transcribe_audio(self, audio_path: str) -> str:
88
- """Transcribe audio to text"""
89
- try:
90
- r = sr.Recognizer()
91
- with sr.AudioFile(audio_path) as source:
92
- audio_data = r.record(source)
93
- text = r.recognize_google(audio_data)
94
- return text
95
- except Exception as e:
96
- return f"Error transcribing audio: {str(e)}"
97
-
98
- def execute_code(self, code: str, language: str = "python") -> str:
99
- """Execute code safely (basic implementation)"""
100
- try:
101
- if language.lower() == "python":
102
- with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
103
- f.write(code)
104
- temp_file = f.name
105
-
106
- try:
107
- result = subprocess.run([sys.executable, temp_file],
108
- capture_output=True, text=True, timeout=10)
109
- output = result.stdout
110
- if result.stderr:
111
- output += f"\nErrors:\n{result.stderr}"
112
- return output
113
- except subprocess.TimeoutExpired:
114
- return "Code execution timed out (10s limit)"
115
- finally:
116
- os.unlink(temp_file)
117
- else:
118
- return f"Language '{language}' not supported yet. Only Python is available."
119
- except Exception as e:
120
- return f"Error executing code: {str(e)}"
121
-
122
- def build_context_messages(self, user_input: str, input_type: str, extracted_content: str = "") -> List[Dict]:
123
- """Build context messages for the AI model"""
124
- messages = []
125
-
126
- system_msg = """You are Nexus AI, a creative multimodal assistant that helps users across different types of content.
127
- You excel at connecting insights across text, voice, and code. Always provide helpful,
128
- contextual responses that build on previous interactions when relevant."""
129
-
130
- messages.append({"role": "system", "content": system_msg})
131
-
132
- relevant_context = self.memory.get_relevant_context(user_input)
133
- for context in relevant_context:
134
- messages.append({
135
- "role": "assistant",
136
- "content": f"[Previous {context['input_type']} interaction] {context['response'][:200]}..."
137
- })
138
-
139
- current_content = f"Input Type: {input_type}\n\n"
140
-
141
- if extracted_content:
142
- current_content += f"Extracted Content:\n{extracted_content[:2000]}...\n\n" if len(extracted_content) > 2000 else f"Extracted Content:\n{extracted_content}\n\n"
143
-
144
- current_content += f"User Query: {user_input}"
145
-
146
- messages.append({"role": "user", "content": current_content})
147
-
148
- return messages
149
-
150
- def generate_response(self, user_input: str, input_type: str, extracted_content: str = "") -> str:
151
- """Generate AI response using AFM-4.5B model"""
152
- if not self.client:
153
- return "❌ Please initialize your Together AI API key first!"
154
-
155
- try:
156
- messages = self.build_context_messages(user_input, input_type, extracted_content)
157
-
158
- response = self.client.chat.completions.create(
159
- model="arcee-ai/AFM-4.5B-Preview",
160
- messages=messages,
161
- max_tokens=1024,
162
- temperature=0.7
163
- )
164
-
165
- ai_response = response.choices[0].message.content
166
-
167
- self.memory.add_interaction(
168
- input_type=input_type,
169
- content=user_input + ("\n" + extracted_content if extracted_content else ""),
170
- response=ai_response
171
- )
172
-
173
- return ai_response
174
-
175
- except Exception as e:
176
- return f"❌ Error generating response: {str(e)}"
177
 
178
- nexus_ai = NexusAI()
179
 
180
- def initialize_api_key(api_key: str) -> Tuple[str, str]:
181
- """Initialize the API key"""
182
- if not api_key.strip():
183
- return "❌ Please enter a valid API key", "error"
184
-
185
- success, message = nexus_ai.initialize_client(api_key.strip())
186
- status = "success" if success else "error"
187
- return message, status
188
 
189
- def process_text_input(user_input: str, api_key_status: str) -> str:
190
- """Process text input"""
191
- if api_key_status != "success":
192
- return "❌ Please initialize your Together AI API key first!"
193
-
194
- if not user_input.strip():
195
- return "Please enter some text to get started!"
196
-
197
- return nexus_ai.generate_response(user_input, "text")
198
 
199
- def process_audio_input(audio_file, user_question: str, api_key_status: str) -> str:
200
- """Process audio input with question"""
201
- if api_key_status != "success":
202
- return "❌ Please initialize your Together AI API key first!"
203
-
204
- if audio_file is None:
205
- return "Please upload an audio file first!"
206
-
207
- transcribed_text = nexus_ai.transcribe_audio(audio_file)
208
-
209
- if user_question.strip():
210
- combined_input = f"Transcribed audio: '{transcribed_text}'\n\nUser question: {user_question}"
211
- return nexus_ai.generate_response(combined_input, "audio", transcribed_text)
212
- else:
213
- return nexus_ai.generate_response("Please help me with this audio content", "audio", transcribed_text)
214
 
215
- def process_code_input(code_input: str, language: str, action: str, api_key_status: str) -> str:
216
- """Process code input"""
217
- if api_key_status != "success":
218
- return "❌ Please initialize your Together AI API key first!"
219
-
220
- if not code_input.strip():
221
- return "Please enter some code first!"
222
-
223
- result = ""
224
-
225
- if action == "Execute Code":
226
- execution_result = nexus_ai.execute_code(code_input, language)
227
- result = f"**Code Execution Result:**\n```\n{execution_result}\n```\n\n"
228
-
229
- ai_response = nexus_ai.generate_response(
230
- f"Please analyze this {language} code and provide insights:\n\n{code_input}",
231
- "code",
232
- result
233
- )
234
-
235
- return result + ai_response
236
 
237
- def show_conversation_history() -> str:
238
- """Show recent conversation history"""
239
- if not nexus_ai.memory.conversations:
240
- return "No conversation history yet. Start chatting to build your knowledge base!"
241
-
242
- history = "## 📚 Recent Conversation History\n\n"
243
- for i, conv in enumerate(nexus_ai.memory.conversations[-10:], 1):
244
- timestamp = datetime.fromisoformat(conv["timestamp"]).strftime("%H:%M:%S")
245
- history += f"**{i}. [{conv['input_type'].upper()}] {timestamp}**\n"
246
- history += f"Input: {conv['content'][:100]}{'...' if len(conv['content']) > 100 else ''}\n"
247
- history += f"Response: {conv['response'][:150]}{'...' if len(conv['response']) > 150 else ''}\n\n"
248
-
249
- return history
250
-
251
- def clear_conversation_history() -> str:
252
- """Clear conversation history"""
253
- nexus_ai.memory.clear_history()
254
- return "✅ Conversation history has been cleared!"
255
-
256
- def clear_text_inputs():
257
- """Clear text input and output"""
258
- return "", ""
259
-
260
- def clear_audio_inputs():
261
- """Clear audio input and output"""
262
- return None, "", ""
263
-
264
- def clear_code_inputs():
265
- """Clear code input and output"""
266
- return "", "", "python", "Execute Code"
267
-
268
- def create_nexus_interface():
269
- with gr.Blocks(
270
- theme=gr.themes.Soft(),
271
- title="Nexus AI Assistant",
272
- css="""
273
- .gradio-container {
274
- max-width: 1400px !important;
275
- margin: 0 auto !important;
276
- padding: 20px !important;
277
- }
278
-
279
- .api-key-section {
280
- background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%);
281
- border-radius: 12px;
282
- padding: 20px;
283
- margin-bottom: 25px;
284
- box-shadow: 0 4px 6px rgba(0, 0, 0, 0.07);
285
- border: 1px solid #e1e8ed;
286
- }
287
-
288
- .primary-button {
289
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
290
- border: none !important;
291
- color: white !important;
292
- font-weight: 600 !important;
293
- border-radius: 8px !important;
294
- padding: 12px 24px !important;
295
- transition: all 0.3s ease !important;
296
- box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1) !important;
297
- margin: auto;
298
- }
299
-
300
- .primary-button:hover {
301
- transform: translateY(-2px) !important;
302
- box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2) !important;
303
- }
304
- .primary-button2 {
305
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
306
- border: none !important;
307
- color: white !important;
308
- font-weight: 600 !important;
309
- border-radius: 8px !important;
310
- padding: 12px 24px !important;
311
- transition: all 0.3s ease !important;
312
- box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1) !important;
313
- margin: auto;
314
- margin-left: 10px;
315
- margin-right: 10px;
316
- }
317
-
318
- .primary-button2:hover {
319
- transform: translateY(-2px) !important;
320
- box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2) !important;
321
- }
322
-
323
- .secondary-button {
324
- background: linear-gradient(135deg, #74b9ff 0%, #0984e3 100%) !important;
325
- border: none !important;
326
- color: white !important;
327
- font-weight: 500 !important;
328
- border-radius: 8px !important;
329
- transition: all 0.3s ease !important;
330
- }
331
-
332
- .danger-button {
333
- background: linear-gradient(135deg, #fd79a8 0%, #e84393 100%) !important;
334
- border: none !important;
335
- color: white !important;
336
- font-weight: 500 !important;
337
- border-radius: 8px !important;
338
- transition: all 0.3s ease !important;
339
- }
340
-
341
- .tab-nav button {
342
- border-radius: 8px 8px 0 0 !important;
343
- font-weight: 500 !important;
344
- padding: 12px 20px !important;
345
- }
346
-
347
- .scrollable-textarea textarea {
348
- overflow-y: auto !important;
349
- resize: vertical !important;
350
- }
351
-
352
- .input-card {
353
- background: #ffffff;
354
- border-radius: 10px;
355
- padding: 20px;
356
- box-shadow: 0 2px 8px rgba(0, 0, 0, 0.05);
357
- border: 1px solid #f0f0f0;
358
- margin-bottom: 15px;
359
- }
360
-
361
- .output-card {
362
- background: #f8f9fc;
363
- border-radius: 10px;
364
- padding: 20px;
365
- box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.02);
366
- border: 1px solid #e9ecef;
367
- }
368
-
369
- .header-gradient {
370
- background: linear-gradient(135deg, #667eea 0%, #764ba2 50%, #667eea 100%);
371
- background-size: 200% 200%;
372
- animation: gradientShift 4s ease infinite;
373
- }
374
-
375
- @keyframes gradientShift {
376
- 0% { background-position: 0% 50%; }
377
- 50% { background-position: 100% 50%; }
378
- 100% { background-position: 0% 50%; }
379
- }
380
-
381
- .status-success {
382
- border-left: 4px solid #00b894 !important;
383
- background-color: #d1f2eb !important;
384
- }
385
-
386
- .status-error {
387
- border-left: 4px solid #e17055 !important;
388
- background-color: #fadbd8 !important;
389
- }
390
-
391
- @media (max-width: 768px) {
392
- .gradio-container {
393
- padding: 10px !important;
394
- }
395
- }
396
- """
397
- ) as app:
398
-
399
- gr.HTML("""
400
- <div class="header-gradient" style="text-align: center; padding: 30px; border-radius: 15px; margin-bottom: 25px;">
401
- <h1 style="color: white; margin: 0; font-size: 3em; font-weight: 700; text-shadow: 2px 2px 4px rgba(0,0,0,0.3);">
402
- 🚀 Nexus AI Assistant
403
- </h1>
404
- <p style="color: white; margin: 15px 0 0 0; font-size: 1.3em; text-shadow: 1px 1px 2px rgba(0,0,0,0.2);">
405
- Creative Multimodal AI Powered by AFM-4.5B
406
- </p>
407
- </div>
408
- """)
409
-
410
- with gr.Group(elem_classes=["api-key-section"]):
411
- gr.HTML("<h3 style='margin-top: 0; color: #2d3748;'>🔑 API Configuration</h3>")
412
- with gr.Row():
413
- api_key_input = gr.Textbox(
414
- label="Together AI API Key",
415
- type="password",
416
- placeholder="Enter your Together AI API key here...",
417
- scale=3,
418
- container=True
419
- )
420
- api_key_btn = gr.Button(
421
- "Initialize API Key",
422
- variant="primary",
423
- scale=1,
424
- elem_classes=["primary-button2"]
425
- )
426
-
427
- api_key_status = gr.Textbox(
428
- label="Status",
429
- interactive=False,
430
- value="Please enter your API key to get started",
431
- elem_classes=["scrollable-textarea"]
432
- )
433
-
434
- api_key_state = gr.State(value="not_initialized")
435
-
436
- with gr.Tabs():
437
-
438
- with gr.Tab("💬 Text Chat"):
439
- with gr.Row():
440
- with gr.Column(scale=1, elem_classes=["input-card"]):
441
- text_input = gr.Textbox(
442
- label="Your Message",
443
- placeholder="Ask me anything! I can help with creative tasks, analysis, problem-solving...",
444
- lines=4,
445
- elem_classes=["scrollable-textarea"]
446
- )
447
- with gr.Row():
448
- text_btn = gr.Button(
449
- "Send Message",
450
- variant="primary",
451
- elem_classes=["primary-button"]
452
- )
453
- text_clear_btn = gr.Button(
454
- "Clear",
455
- variant="secondary",
456
- elem_classes=["danger-button"]
457
- )
458
-
459
- with gr.Column(scale=1, elem_classes=["output-card"]):
460
- text_output = gr.Textbox(
461
- label="Nexus AI Response",
462
- lines=10,
463
- interactive=False,
464
- elem_classes=["scrollable-textarea"]
465
- )
466
-
467
- with gr.Tab("🎤 Voice Processing"):
468
- with gr.Row():
469
- with gr.Column(scale=1, elem_classes=["input-card"]):
470
- audio_file = gr.Audio(
471
- label="Upload Audio (WAV File supported)",
472
- type="filepath"
473
- )
474
- audio_question = gr.Textbox(
475
- label="Additional Question (optional)",
476
- placeholder="Any specific question about the audio content?",
477
- lines=3,
478
- elem_classes=["scrollable-textarea"]
479
- )
480
- with gr.Row():
481
- audio_btn = gr.Button(
482
- "Process Audio",
483
- variant="primary",
484
- elem_classes=["primary-button"]
485
- )
486
- audio_clear_btn = gr.Button(
487
- "Clear",
488
- variant="secondary",
489
- elem_classes=["danger-button"]
490
- )
491
-
492
- with gr.Column(scale=1, elem_classes=["output-card"]):
493
- audio_output = gr.Textbox(
494
- label="Processing Result",
495
- lines=12,
496
- interactive=False,
497
- elem_classes=["scrollable-textarea"]
498
- )
499
-
500
- with gr.Tab("⚡ Code Executor"):
501
- with gr.Row():
502
- with gr.Column(scale=1, elem_classes=["input-card"]):
503
- code_input = gr.Code(
504
- label="Code Input",
505
- language="python",
506
- lines=10
507
- )
508
- with gr.Row():
509
- language_select = gr.Dropdown(
510
- choices=["python", "No other Language supported yet!"],
511
- value="python",
512
- label="Language",
513
- scale=1
514
- )
515
- code_action = gr.Radio(
516
- choices=["Execute Code", "Analyze Only"],
517
- value="Execute Code",
518
- label="Action",
519
- scale=1
520
- )
521
- with gr.Row():
522
- code_btn = gr.Button(
523
- "Process Code",
524
- variant="primary",
525
- elem_classes=["primary-button"]
526
- )
527
- code_clear_btn = gr.Button(
528
- "Clear",
529
- variant="secondary",
530
- elem_classes=["danger-button"]
531
- )
532
-
533
- with gr.Column(scale=1, elem_classes=["output-card"]):
534
- code_output = gr.Textbox(
535
- label="Result & Analysis",
536
- lines=15,
537
- interactive=False,
538
- elem_classes=["scrollable-textarea"]
539
- )
540
-
541
- with gr.Tab("🧠 Memory & History"):
542
- with gr.Column(elem_classes=["input-card"]):
543
- gr.HTML("<h3 style='margin-top: 0;'>Conversation Memory</h3>")
544
- gr.HTML("<p>Nexus AI remembers your interactions and can connect insights across different input types.</p>")
545
-
546
- with gr.Row():
547
- history_btn = gr.Button(
548
- "Show Recent History",
549
- variant="secondary",
550
- elem_classes=["secondary-button"],
551
- scale=1
552
- )
553
- clear_btn = gr.Button(
554
- "Clear History",
555
- variant="secondary",
556
- elem_classes=["danger-button"],
557
- scale=1
558
- )
559
-
560
- history_output = gr.Textbox(
561
- label="Conversation History",
562
- lines=15,
563
- interactive=False,
564
- elem_classes=["scrollable-textarea"]
565
- )
566
-
567
- def update_api_status(api_key):
568
- message, status = initialize_api_key(api_key)
569
- if status == "success":
570
- return gr.update(value=message, elem_classes=["scrollable-textarea", "status-success"]), status
571
- else:
572
- return gr.update(value=message, elem_classes=["scrollable-textarea", "status-error"]), status
573
-
574
- api_key_btn.click(
575
- fn=update_api_status,
576
- inputs=[api_key_input],
577
- outputs=[api_key_status, api_key_state]
578
- )
579
-
580
- text_btn.click(
581
- fn=process_text_input,
582
- inputs=[text_input, api_key_state],
583
- outputs=[text_output]
584
- )
585
-
586
- text_clear_btn.click(
587
- fn=clear_text_inputs,
588
- inputs=[],
589
- outputs=[text_input, text_output]
590
- )
591
-
592
- audio_btn.click(
593
- fn=process_audio_input,
594
- inputs=[audio_file, audio_question, api_key_state],
595
- outputs=[audio_output]
596
- )
597
-
598
- audio_clear_btn.click(
599
- fn=clear_audio_inputs,
600
- inputs=[],
601
- outputs=[audio_file, audio_question, audio_output]
602
- )
603
-
604
- code_btn.click(
605
- fn=process_code_input,
606
- inputs=[code_input, language_select, code_action, api_key_state],
607
- outputs=[code_output]
608
- )
609
-
610
- code_clear_btn.click(
611
- fn=clear_code_inputs,
612
- inputs=[],
613
- outputs=[code_input, code_output, language_select, code_action]
614
- )
615
-
616
- history_btn.click(
617
- fn=show_conversation_history,
618
- outputs=[history_output]
619
- )
620
-
621
- clear_btn.click(
622
- fn=clear_conversation_history,
623
- outputs=[history_output]
624
- )
625
-
626
- gr.HTML("""
627
- <div style="text-align: center; padding: 25px; margin-top: 30px; border-top: 2px solid #e9ecef; background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%); border-radius: 10px;">
628
- <p style="color: #495057; font-size: 1.1em; margin: 0;">
629
- 🚀 <strong>Nexus AI Assistant</strong> - Powered by AFM-4.5B | Built with ❤️ using Gradio
630
- </p>
631
- <p style="color: #6c757d; font-size: 0.9em; margin: 5px 0 0 0;">
632
- Multi-modal AI assistant for creative and analytical tasks
633
- </p>
634
- </div>
635
- """)
636
-
637
- return app
638
 
639
  if __name__ == "__main__":
640
- app = create_nexus_interface()
641
- app.launch(
642
- share=True
643
- )
 
1
+ from Crypto.Cipher import AES
2
+ from Crypto.Protocol.KDF import PBKDF2
3
  import os
 
 
 
4
  import tempfile
5
+ from dotenv import load_dotenv
 
6
 
7
+ load_dotenv() # Load all environment variables
 
 
 
 
 
 
 
 
 
 
8
 
9
+ def unpad(data):
10
+ return data[:-data[-1]]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
+ def decrypt_and_run():
13
+ # Get password from Hugging Face Secrets environment variable
14
+ password = os.getenv("PASSWORD")
15
+ if not password:
16
+ raise ValueError("PASSWORD secret not found in environment variables")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
+ password = password.encode()
19
 
20
+ with open("code.enc", "rb") as f:
21
+ encrypted = f.read()
 
 
 
 
 
 
22
 
23
+ salt = encrypted[:16]
24
+ iv = encrypted[16:32]
25
+ ciphertext = encrypted[32:]
 
 
 
 
 
 
26
 
27
+ key = PBKDF2(password, salt, dkLen=32, count=1000000)
28
+ cipher = AES.new(key, AES.MODE_CBC, iv)
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
+ plaintext = unpad(cipher.decrypt(ciphertext))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
+ with tempfile.NamedTemporaryFile(suffix=".py", delete=False, mode='wb') as tmp:
33
+ tmp.write(plaintext)
34
+ tmp.flush()
35
+ print(f"[INFO] Running decrypted code from {tmp.name}")
36
+ os.system(f"python {tmp.name}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
  if __name__ == "__main__":
39
+ decrypt_and_run()
40
+
41
+ # This script decrypts the encrypted code and runs it.
42
+ # Ensure you have the PASSWORD secret set in your Hugging Face Secrets