CosmickVisions commited on
Commit
2247005
Β·
verified Β·
1 Parent(s): 9c82681

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +937 -153
app.py CHANGED
@@ -1,21 +1,35 @@
1
- import gradio as gr
2
- import groq
3
  import os
4
  import tempfile
5
  import uuid
6
- from dotenv import load_dotenv
7
- from langchain.text_splitter import RecursiveCharacterTextSplitter
8
- from langchain.vectorstores import FAISS
9
- from langchain.embeddings import HuggingFaceEmbeddings
10
- import fitz # PyMuPDF
11
  import base64
12
- from PIL import Image
13
  import io
14
- import requests
15
  import json
16
  import re
17
  from datetime import datetime, timedelta
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  # Load environment variables
20
  load_dotenv()
21
  client = groq.Client(api_key=os.getenv("GROQ_TECH_API_KEY"))
@@ -29,47 +43,371 @@ if not os.path.exists(FAISS_INDEX_DIR):
29
  # Dictionary to store user-specific vectorstores
30
  user_vectorstores = {}
31
 
32
- # Custom CSS for Tech theme
33
- custom_css = """
34
- :root {
35
- --primary-color: #4285F4; /* Google Blue */
36
- --secondary-color: #34A853; /* Google Green */
37
- --light-background: #F8F9FA;
38
- --dark-text: #202124;
39
- --white: #FFFFFF;
40
- --border-color: #DADCE0;
41
- --code-bg: #F1F3F4;
42
- --code-text: #37474F;
43
- --error-color: #EA4335; /* Google Red */
44
- --warning-color: #FBBC04; /* Google Yellow */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  }
46
- body { background-color: var(--light-background); font-family: 'Google Sans', 'Roboto', sans-serif; }
47
- .container { max-width: 1200px !important; margin: 0 auto !important; padding: 10px; }
48
- .header { background-color: var(--white); border-bottom: 1px solid var(--border-color); padding: 15px 0; margin-bottom: 20px; border-radius: 12px 12px 0 0; box-shadow: 0 1px 2px rgba(0,0,0,0.05); }
49
- .header-title { color: var(--primary-color); font-size: 1.8rem; font-weight: 700; text-align: center; }
50
- .header-subtitle { color: var(--dark-text); font-size: 1rem; text-align: center; margin-top: 5px; }
51
- .chat-container { border-radius: 8px !important; box-shadow: 0 1px 3px rgba(0,0,0,0.1) !important; background-color: var(--white) !important; border: 1px solid var(--border-color) !important; min-height: 500px; }
52
- .message-user { background-color: var(--primary-color) !important; color: var(--white) !important; border-radius: 18px 18px 4px 18px !important; padding: 12px 16px !important; margin-left: auto !important; max-width: 80% !important; }
53
- .message-bot { background-color: #F1F3F4 !important; color: var(--dark-text) !important; border-radius: 18px 18px 18px 4px !important; padding: 12px 16px !important; margin-right: auto !important; max-width: 80% !important; }
54
- .input-area { background-color: var(--white) !important; border-top: 1px solid var(--border-color) !important; padding: 12px !important; border-radius: 0 0 12px 12px !important; }
55
- .input-box { border: 1px solid var(--border-color) !important; border-radius: 24px !important; padding: 12px 16px !important; box-shadow: 0 1px 2px rgba(0,0,0,0.05) !important; }
56
- .send-btn { background-color: var(--primary-color) !important; border-radius: 24px !important; color: var(--white) !important; padding: 10px 20px !important; font-weight: 500 !important; }
57
- .clear-btn { background-color: #F1F3F4 !important; border: 1px solid var(--border-color) !important; border-radius: 24px !important; color: var(--dark-text) !important; padding: 8px 16px !important; font-weight: 500 !important; }
58
- .pdf-viewer-container { border-radius: 8px !important; box-shadow: 0 1px 3px rgba(0,0,0,0.1) !important; background-color: var(--white) !important; border: 1px solid var(--border-color) !important; padding: 20px; }
59
- .pdf-viewer-image { max-width: 100%; height: auto; border: 1px solid var(--border-color); border-radius: 8px; box-shadow: 0 1px 2px rgba(0,0,0,0.05); }
60
- .stats-box { background-color: #E8F0FE; padding: 10px; border-radius: 8px; margin-top: 10px; }
61
- .tool-container { background-color: var(--white); border-radius: 8px; box-shadow: 0 1px 3px rgba(0,0,0,0.1); padding: 15px; margin-bottom: 20px; border: 1px solid var(--border-color); }
62
- .code-block { background-color: var(--code-bg); color: var(--code-text); padding: 12px; border-radius: 8px; font-family: 'Roboto Mono', monospace; overflow-x: auto; margin: 10px 0; border-left: 3px solid var(--primary-color); }
63
- .repo-card { border: 1px solid var(--border-color); padding: 15px; margin: 10px 0; border-radius: 8px; background-color: var(--white); }
64
- .repo-name { color: var(--primary-color); font-weight: bold; font-size: 1.1rem; margin-bottom: 5px; }
65
- .repo-description { color: var(--dark-text); font-size: 0.9rem; margin-bottom: 10px; }
66
- .repo-stats { display: flex; gap: 15px; color: #5F6368; font-size: 0.85rem; }
67
- .repo-stat { display: flex; align-items: center; gap: 5px; }
68
- .qa-card { border-left: 3px solid var(--secondary-color); padding: 10px 15px; margin: 15px 0; background-color: #F8F9FA; border-radius: 0 8px 8px 0; }
69
- .qa-title { font-weight: bold; color: var(--dark-text); margin-bottom: 5px; }
70
- .qa-body { color: var(--dark-text); font-size: 0.95rem; margin-bottom: 10px; }
71
- .qa-meta { display: flex; justify-content: space-between; color: #5F6368; font-size: 0.85rem; }
72
- .tag { background-color: #E8F0FE; color: var(--primary-color); padding: 4px 8px; border-radius: 4px; font-size: 0.8rem; margin-right: 5px; display: inline-block; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  """
74
 
75
  # Function to process PDF files
@@ -109,8 +447,147 @@ def process_pdf(pdf_file):
109
  os.unlink(pdf_path)
110
  return None, f"Error processing PDF: {str(e)}", {"page_images": [], "total_pages": 0, "total_words": 0}
111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  # Function to generate chatbot responses with Tech theme
113
- def generate_response(message, session_id, model_name, history):
114
  if not message:
115
  return history
116
  try:
@@ -121,8 +598,8 @@ def generate_response(message, session_id, model_name, history):
121
  if docs:
122
  context = "\n\nRelevant information from uploaded PDF:\n" + "\n".join(f"- {doc.page_content}" for doc in docs)
123
 
124
- # Check if it's a GitHub repo search
125
- if re.match(r'^/github\s+.+', message, re.IGNORECASE):
126
  query = re.sub(r'^/github\s+', '', message, flags=re.IGNORECASE)
127
  repo_results = search_github_repos(query)
128
  if repo_results:
@@ -139,8 +616,8 @@ def generate_response(message, session_id, model_name, history):
139
  history.append((message, "No GitHub repositories found for your query."))
140
  return history
141
 
142
- # Check if it's a Stack Overflow search
143
- if re.match(r'^/stack\s+.+', message, re.IGNORECASE):
144
  query = re.sub(r'^/stack\s+', '', message, flags=re.IGNORECASE)
145
  qa_results = search_stackoverflow(query)
146
  if qa_results:
@@ -433,110 +910,402 @@ def perform_stack_search(query, tag, sort_by):
433
  except Exception as e:
434
  return f"Error searching Stack Overflow: {str(e)}"
435
 
436
- # Gradio interface
437
- with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
438
  current_session_id = gr.State(None)
439
  pdf_state = gr.State({"page_images": [], "total_pages": 0, "total_words": 0})
 
 
 
 
440
  gr.HTML("""
441
- <div class="header">
442
- <div class="header-title">Tech-Vision</div>
443
- <div class="header-subtitle">Analyze technical documents with Groq's LLM API.</div>
 
 
 
444
  </div>
445
  """)
446
- with gr.Row(elem_classes="container"):
447
  with gr.Column(scale=1, min_width=300):
448
- pdf_file = gr.File(label="Upload PDF Document", file_types=[".pdf"], type="binary")
449
- upload_button = gr.Button("Process PDF", variant="primary")
450
- pdf_status = gr.Markdown("No PDF uploaded yet")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
451
  model_dropdown = gr.Dropdown(
452
  choices=["llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768", "gemma-7b-it"],
453
  value="llama3-70b-8192",
454
  label="Select Groq Model"
455
  )
456
-
457
- # Tech Tools Section
458
- gr.Markdown("### Developer Tools", elem_classes="tool-title")
459
- with gr.Group(elem_classes="tool-container"):
460
- with gr.Tabs():
461
- with gr.TabItem("GitHub Search"):
462
- repo_query = gr.Textbox(label="Search Query", placeholder="Enter keywords to search for repositories")
463
- with gr.Row():
464
- language = gr.Dropdown(
465
- choices=["any", "JavaScript", "Python", "Java", "C++", "TypeScript", "Go", "Rust", "PHP", "C#"],
466
- value="any",
467
- label="Language"
468
- )
469
- min_stars = gr.Dropdown(
470
- choices=["0", "10", "50", "100", "1000", "10000"],
471
- value="0",
472
- label="Min Stars"
473
- )
474
- sort_by = gr.Dropdown(
475
- choices=["stars", "forks", "updated"],
476
- value="stars",
477
- label="Sort By"
478
- )
479
- repo_search_btn = gr.Button("Search Repositories")
480
-
481
- with gr.TabItem("Stack Overflow"):
482
- stack_query = gr.Textbox(label="Search Query", placeholder="Enter your technical question")
483
- with gr.Row():
484
- tag = gr.Dropdown(
485
- choices=["any", "python", "javascript", "java", "c++", "react", "node.js", "android", "ios", "sql"],
486
- value="any",
487
- label="Tag"
488
- )
489
- so_sort_by = gr.Dropdown(
490
- choices=["votes", "newest", "activity"],
491
- value="votes",
492
- label="Sort By"
493
- )
494
- so_search_btn = gr.Button("Search Stack Overflow")
495
-
496
- with gr.TabItem("Code Explainer"):
497
- code_input = gr.Textbox(
498
- label="Code to Explain",
499
- placeholder="Paste your code here...",
500
- lines=10
501
- )
502
- explain_btn = gr.Button("Explain Code")
503
-
504
  with gr.Column(scale=2, min_width=600):
505
  with gr.Tabs():
506
  with gr.TabItem("PDF Viewer"):
507
  with gr.Column(elem_classes="pdf-viewer-container"):
508
  page_slider = gr.Slider(minimum=1, maximum=1, step=1, label="Page Number", value=1)
509
  pdf_image = gr.Image(label="PDF Page", type="pil", elem_classes="pdf-viewer-image")
510
- stats_display = gr.Markdown("No PDF uploaded yet", elem_classes="stats-box")
511
 
512
- with gr.TabItem("GitHub Results"):
513
- repo_results = gr.Markdown("Search for repositories to see results here")
 
514
 
515
- with gr.TabItem("Stack Overflow Results"):
516
- stack_results = gr.Markdown("Search for questions to see results here")
517
-
518
- with gr.TabItem("Code Explanation"):
519
- code_explanation = gr.Markdown("Paste your code and click 'Explain Code' to see an explanation here")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
520
 
 
521
  with gr.Row(elem_classes="container"):
522
  with gr.Column(scale=2, min_width=600):
523
- chatbot = gr.Chatbot(height=500, bubble_full_width=False, show_copy_button=True, elem_classes="chat-container")
 
 
 
 
 
524
  with gr.Row():
525
- msg = gr.Textbox(show_label=False, placeholder="Ask about your document, type /github to search repos, or /stack to search Stack Overflow...", scale=5)
 
 
 
 
 
526
  send_btn = gr.Button("Send", scale=1)
527
- clear_btn = gr.Button("Clear Conversation")
 
 
 
 
528
 
529
- # Event Handlers
530
- upload_button.click(
 
 
 
 
531
  process_pdf,
532
  inputs=[pdf_file],
533
- outputs=[current_session_id, pdf_status, pdf_state]
534
  ).then(
535
  update_pdf_viewer,
536
  inputs=[pdf_state],
537
- outputs=[page_slider, pdf_image, stats_display]
538
  )
539
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
540
  msg.submit(
541
  generate_response,
542
  inputs=[msg, current_session_id, model_dropdown, chatbot],
@@ -549,44 +1318,59 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
549
  outputs=[chatbot]
550
  ).then(lambda: "", None, [msg])
551
 
552
- clear_btn.click(
553
- lambda: ([], None, "No PDF uploaded yet", {"page_images": [], "total_pages": 0, "total_words": 0}, 0, None, "No PDF uploaded yet"),
554
- None,
555
- [chatbot, current_session_id, pdf_status, pdf_state, page_slider, pdf_image, stats_display]
 
556
  )
557
 
 
 
 
 
 
 
 
 
 
 
 
 
558
  page_slider.change(
559
  update_image,
560
  inputs=[page_slider, pdf_state],
561
  outputs=[pdf_image]
562
  )
563
 
564
- # Tech tool handlers
565
- repo_search_btn.click(
566
- perform_repo_search,
567
- inputs=[repo_query, language, sort_by, min_stars],
568
- outputs=[repo_results]
569
- )
570
-
571
- so_search_btn.click(
572
- perform_stack_search,
573
- inputs=[stack_query, tag, so_sort_by],
574
- outputs=[stack_results]
575
- )
576
-
577
- explain_btn.click(
578
- explain_code,
579
- inputs=[code_input],
580
- outputs=[code_explanation]
581
  )
582
 
583
- # Add footer with attribution
584
- gr.HTML("""
585
- <div style="text-align: center; margin-top: 20px; padding: 10px; color: #666; font-size: 0.8rem; border-top: 1px solid #eee;">
586
- Created by Calvin Allen Crawford
587
- </div>
588
- """)
 
 
589
 
590
  # Launch the app
591
  if __name__ == "__main__":
 
592
  demo.launch()
 
1
+ # Standard library imports
 
2
  import os
3
  import tempfile
4
  import uuid
 
 
 
 
 
5
  import base64
 
6
  import io
 
7
  import json
8
  import re
9
  from datetime import datetime, timedelta
10
 
11
+ # Third-party imports
12
+ import gradio as gr
13
+ import groq
14
+ import numpy as np
15
+ import pandas as pd
16
+ import openpyxl
17
+ import requests
18
+ import fitz # PyMuPDF
19
+ from PIL import Image
20
+ from dotenv import load_dotenv
21
+ from transformers import AutoProcessor, AutoModelForVision2Seq
22
+ import torch
23
+ import sass
24
+ from pathlib import Path
25
+ import pyttsx3
26
+ import speech_recognition as sr
27
+
28
+ # LangChain imports
29
+ from langchain_community.embeddings import HuggingFaceEmbeddings
30
+ from langchain_community.vectorstores import FAISS
31
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
32
+
33
  # Load environment variables
34
  load_dotenv()
35
  client = groq.Client(api_key=os.getenv("GROQ_TECH_API_KEY"))
 
43
  # Dictionary to store user-specific vectorstores
44
  user_vectorstores = {}
45
 
46
+ # Advanced SCSS with cyberpunk styling
47
+ CYBERPUNK_SCSS = """
48
+ // Advanced Cyberpunk Theme with Neural Network Aesthetics
49
+ @use "sass:math";
50
+ @use "sass:color";
51
+
52
+ // Neural Color System
53
+ $neural-colors: (
54
+ 'synapse-blue': #00F3FF,
55
+ 'neural-red': #FF0033,
56
+ 'data-yellow': #FFE600,
57
+ 'matrix-green': #00FF9F,
58
+ 'void-black': #0D0D0D,
59
+ 'deep-void': #080808,
60
+ 'neural-white': #E6E6E6,
61
+ 'grid-alpha': 0.1
62
+ );
63
+
64
+ // Dynamic Color Functions
65
+ @function neural-glow($color, $intensity: 1) {
66
+ $glow-color: map-get($neural-colors, $color);
67
+ @return (
68
+ 0 0 #{10px * $intensity} $glow-color,
69
+ 0 0 #{20px * $intensity} $glow-color
70
+ );
71
+ }
72
+
73
+ @function generate-glitch-animation($name, $color1, $color2) {
74
+ @keyframes #{$name} {
75
+ 0%, 100% {
76
+ text-shadow: -2px 0 map-get($neural-colors, $color1),
77
+ 2px 2px map-get($neural-colors, $color2);
78
+ }
79
+ 25% {
80
+ text-shadow: 2px -2px map-get($neural-colors, $color1),
81
+ -2px -2px map-get($neural-colors, $color2);
82
+ }
83
+ 50% {
84
+ text-shadow: 1px 3px map-get($neural-colors, $color1),
85
+ -3px -1px map-get($neural-colors, $color2);
86
+ }
87
+ 75% {
88
+ text-shadow: -3px 1px map-get($neural-colors, $color1),
89
+ 1px -1px map-get($neural-colors, $color2);
90
+ }
91
+ }
92
+ }
93
+
94
+ // Generate Multiple Glitch Animations
95
+ #{generate-glitch-animation('neural-glitch', 'synapse-blue', 'neural-red')}
96
+ #{generate-glitch-animation('data-glitch', 'data-yellow', 'matrix-green')}
97
+
98
+ // Advanced Mixins
99
+ @mixin neural-container($depth: 1) {
100
+ background: linear-gradient(
101
+ 170deg,
102
+ rgba(map-get($neural-colors, 'deep-void'), 0.9),
103
+ rgba(map-get($neural-colors, 'void-black'), 0.95)
104
+ );
105
+ border: #{$depth}px solid map-get($neural-colors, 'synapse-blue');
106
+ box-shadow: neural-glow('synapse-blue', $depth);
107
+ backdrop-filter: blur(5px);
108
+ position: relative;
109
+ overflow: hidden;
110
+
111
+ &::before {
112
+ content: '';
113
+ position: absolute;
114
+ top: 0;
115
+ left: 0;
116
+ right: 0;
117
+ height: 1px;
118
+ background: linear-gradient(
119
+ 90deg,
120
+ transparent,
121
+ map-get($neural-colors, 'synapse-blue'),
122
+ transparent
123
+ );
124
+ animation: neural-scan 2s linear infinite;
125
+ }
126
+ }
127
+
128
+ @mixin cyber-text($size, $color: 'synapse-blue') {
129
+ font-family: 'Orbitron', 'Rajdhani', sans-serif;
130
+ font-size: $size;
131
+ color: map-get($neural-colors, $color);
132
+ text-transform: uppercase;
133
+ letter-spacing: 2px;
134
+ position: relative;
135
+ text-shadow: 0 0 5px map-get($neural-colors, $color);
136
+ }
137
+
138
+ // Advanced Animations
139
+ @keyframes neural-scan {
140
+ 0% { transform: translateX(-100%); opacity: 0; }
141
+ 50% { opacity: 1; }
142
+ 100% { transform: translateX(100%); opacity: 0; }
143
+ }
144
+
145
+ @keyframes data-pulse {
146
+ 0%, 100% { opacity: 0.8; transform: scale(1); }
147
+ 50% { opacity: 1; transform: scale(1.02); }
148
+ }
149
+
150
+ // Base Styles
151
+ body {
152
+ background-color: map-get($neural-colors, 'void-black');
153
+ background-image:
154
+ linear-gradient(
155
+ rgba(map-get($neural-colors, 'synapse-blue'),
156
+ map-get($neural-colors, 'grid-alpha')) 1px,
157
+ transparent 1px
158
+ ),
159
+ linear-gradient(
160
+ 90deg,
161
+ rgba(map-get($neural-colors, 'synapse-blue'),
162
+ map-get($neural-colors, 'grid-alpha')) 1px,
163
+ transparent 1px
164
+ );
165
+ background-size: 20px 20px;
166
+ color: map-get($neural-colors, 'neural-white');
167
+ }
168
+
169
+ // Advanced Components
170
+ .neural-interface {
171
+ @include neural-container(2);
172
+ padding: 20px;
173
+ margin: 20px;
174
+ clip-path: polygon(
175
+ 0 20px,
176
+ 20px 0,
177
+ calc(100% - 20px) 0,
178
+ 100% 20px,
179
+ 100% calc(100% - 20px),
180
+ calc(100% - 20px) 100%,
181
+ 20px 100%,
182
+ 0 calc(100% - 20px)
183
+ );
184
+
185
+ &__header {
186
+ @include cyber-text(2rem);
187
+ text-align: center;
188
+ margin-bottom: 20px;
189
+ animation: neural-glitch 5s infinite;
190
+ }
191
+
192
+ &__content {
193
+ position: relative;
194
+ z-index: 1;
195
+ }
196
+ }
197
+
198
+ .data-display {
199
+ @include neural-container(1);
200
+ padding: 15px;
201
+ margin: 10px 0;
202
+ animation: data-pulse 4s infinite;
203
+
204
+ &__label {
205
+ @include cyber-text(0.9rem, 'data-yellow');
206
+ margin-bottom: 5px;
207
+ }
208
+
209
+ &__value {
210
+ @include cyber-text(1.2rem, 'matrix-green');
211
+ }
212
+ }
213
+
214
+ // Interactive Elements
215
+ .neural-button {
216
+ @include neural-container(1);
217
+ padding: 10px 20px;
218
+ cursor: pointer;
219
+ transition: all 0.3s ease;
220
+
221
+ &:hover {
222
+ transform: translateY(-2px) scale(1.02);
223
+ box-shadow: neural-glow('synapse-blue', 2);
224
+ }
225
+
226
+ &:active {
227
+ transform: translateY(1px);
228
+ }
229
+ }
230
+
231
+ // Code Display
232
+ .code-matrix {
233
+ @include neural-container(1);
234
+ font-family: 'Source Code Pro', monospace;
235
+ padding: 20px;
236
+ margin: 15px 0;
237
+
238
+ &__line {
239
+ position: relative;
240
+ padding-left: 20px;
241
+
242
+ &::before {
243
+ content: '>';
244
+ position: absolute;
245
+ left: 0;
246
+ color: map-get($neural-colors, 'matrix-green');
247
+ }
248
+ }
249
+ }
250
+
251
+ // Status Indicators
252
+ .neural-status {
253
+ display: flex;
254
+ align-items: center;
255
+ gap: 10px;
256
+
257
+ &__indicator {
258
+ width: 10px;
259
+ height: 10px;
260
+ border-radius: 50%;
261
+ background: map-get($neural-colors, 'matrix-green');
262
+ animation: data-pulse 2s infinite;
263
+ }
264
+
265
+ &__text {
266
+ @include cyber-text(0.9rem, 'matrix-green');
267
+ }
268
+ }
269
+
270
+ // Advanced Grid Layout
271
+ .neural-grid {
272
+ display: grid;
273
+ grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
274
+ gap: 20px;
275
+ padding: 20px;
276
+
277
+ &__item {
278
+ @include neural-container(1);
279
+ padding: 15px;
280
+ transition: transform 0.3s ease;
281
+
282
+ &:hover {
283
+ transform: translateZ(20px);
284
+ z-index: 2;
285
+ }
286
+ }
287
  }
288
+ """
289
+
290
+ # Compile SCSS to CSS
291
+ def compile_scss():
292
+ try:
293
+ return sass.compile(string=CYBERPUNK_SCSS)
294
+ except sass.CompileError as e:
295
+ print(f"SCSS Compilation Error: {e}")
296
+ return ""
297
+
298
+ # Advanced JavaScript for dynamic effects
299
+ NEURAL_JS = """
300
+ <script>
301
+ class NeuralInterface {
302
+ constructor() {
303
+ this.initializeEffects();
304
+ this.setupEventListeners();
305
+ }
306
+
307
+ initializeEffects() {
308
+ this.setupGlitchEffects();
309
+ this.setupDataStreams();
310
+ this.setupHolographicEffects();
311
+ }
312
+
313
+ setupGlitchEffects() {
314
+ document.querySelectorAll('.neural-interface__header').forEach(element => {
315
+ setInterval(() => {
316
+ if (Math.random() < 0.1) {
317
+ element.style.transform = `translate(${Math.random() * 4 - 2}px, ${Math.random() * 4 - 2}px)`;
318
+ setTimeout(() => element.style.transform = 'none', 100);
319
+ }
320
+ }, 2000);
321
+ });
322
+ }
323
+
324
+ setupDataStreams() {
325
+ const canvas = document.createElement('canvas');
326
+ document.body.appendChild(canvas);
327
+ canvas.style.position = 'fixed';
328
+ canvas.style.top = '0';
329
+ canvas.style.left = '0';
330
+ canvas.style.width = '100%';
331
+ canvas.style.height = '100%';
332
+ canvas.style.pointerEvents = 'none';
333
+ canvas.style.zIndex = '1';
334
+ canvas.style.opacity = '0.1';
335
+
336
+ const ctx = canvas.getContext('2d');
337
+ const matrix = "ABCDEFGHIJKLMNOPQRSTUVWXYZ123456789@#$%^&*()*&^%";
338
+ const drops = [];
339
+
340
+ function initMatrix() {
341
+ canvas.width = window.innerWidth;
342
+ canvas.height = window.innerHeight;
343
+ const columns = canvas.width / 20;
344
+ for(let i = 0; i < columns; i++) drops[i] = 1;
345
+ }
346
+
347
+ function drawMatrix() {
348
+ ctx.fillStyle = 'rgba(0, 0, 0, 0.05)';
349
+ ctx.fillRect(0, 0, canvas.width, canvas.height);
350
+ ctx.fillStyle = '#0F0';
351
+ ctx.font = '15px monospace';
352
+ for(let i = 0; i < drops.length; i++) {
353
+ const text = matrix[Math.floor(Math.random() * matrix.length)];
354
+ ctx.fillText(text, i * 20, drops[i] * 20);
355
+ if(drops[i] * 20 > canvas.height && Math.random() > 0.975)
356
+ drops[i] = 0;
357
+ drops[i]++;
358
+ }
359
+ }
360
+
361
+ window.addEventListener('resize', initMatrix);
362
+ initMatrix();
363
+ setInterval(drawMatrix, 50);
364
+ }
365
+
366
+ setupHolographicEffects() {
367
+ document.querySelectorAll('.neural-button').forEach(button => {
368
+ button.addEventListener('mousemove', e => {
369
+ const rect = button.getBoundingClientRect();
370
+ const x = e.clientX - rect.left;
371
+ const y = e.clientY - rect.top;
372
+
373
+ button.style.setProperty('--x', `${x}px`);
374
+ button.style.setProperty('--y', `${y}px`);
375
+ });
376
+ });
377
+ }
378
+
379
+ setupEventListeners() {
380
+ document.addEventListener('click', e => {
381
+ if (e.target.closest('.neural-button')) {
382
+ this.createRippleEffect(e);
383
+ }
384
+ });
385
+ }
386
+
387
+ createRippleEffect(e) {
388
+ const button = e.target.closest('.neural-button');
389
+ const ripple = document.createElement('span');
390
+ ripple.classList.add('ripple');
391
+ button.appendChild(ripple);
392
+
393
+ const rect = button.getBoundingClientRect();
394
+ const size = Math.max(rect.width, rect.height);
395
+ ripple.style.width = ripple.style.height = `${size}px`;
396
+
397
+ const x = e.clientX - rect.left - size/2;
398
+ const y = e.clientY - rect.top - size/2;
399
+ ripple.style.left = `${x}px`;
400
+ ripple.style.top = `${y}px`;
401
+
402
+ setTimeout(() => ripple.remove(), 600);
403
+ }
404
+ }
405
+
406
+ // Initialize Neural Interface
407
+ document.addEventListener('DOMContentLoaded', () => {
408
+ new NeuralInterface();
409
+ });
410
+ </script>
411
  """
412
 
413
  # Function to process PDF files
 
447
  os.unlink(pdf_path)
448
  return None, f"Error processing PDF: {str(e)}", {"page_images": [], "total_pages": 0, "total_words": 0}
449
 
450
+ # New function to process Excel files
451
+ def process_excel(excel_file):
452
+ if excel_file is None:
453
+ return None, "No file uploaded", {"data_preview": "", "total_sheets": 0, "total_rows": 0}
454
+
455
+ try:
456
+ session_id = str(uuid.uuid4())
457
+ with tempfile.NamedTemporaryFile(suffix=".xlsx", delete=False) as temp_file:
458
+ temp_file.write(excel_file)
459
+ excel_path = temp_file.name
460
+
461
+ # Read Excel file with pandas
462
+ excel_data = pd.ExcelFile(excel_path)
463
+ sheet_names = excel_data.sheet_names
464
+ all_texts = []
465
+ total_rows = 0
466
+
467
+ # Process each sheet
468
+ for sheet in sheet_names:
469
+ df = pd.read_excel(excel_path, sheet_name=sheet)
470
+ total_rows += len(df)
471
+
472
+ # Convert dataframe to text for vectorization
473
+ sheet_text = f"Sheet: {sheet}\n"
474
+ sheet_text += df.to_string(index=False)
475
+ all_texts.append(sheet_text)
476
+
477
+ # Generate HTML preview of first sheet
478
+ first_df = pd.read_excel(excel_path, sheet_name=0)
479
+ preview_rows = min(10, len(first_df))
480
+ data_preview = first_df.head(preview_rows).to_html(classes="excel-preview-table", index=False)
481
+
482
+ # Process for vectorstore
483
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
484
+ chunks = text_splitter.create_documents(all_texts)
485
+ vectorstore = FAISS.from_documents(chunks, embeddings)
486
+ index_path = os.path.join(FAISS_INDEX_DIR, session_id)
487
+ vectorstore.save_local(index_path)
488
+ user_vectorstores[session_id] = vectorstore
489
+
490
+ os.unlink(excel_path)
491
+ excel_state = {"data_preview": data_preview, "total_sheets": len(sheet_names), "total_rows": total_rows}
492
+ return session_id, f"βœ… Successfully processed {len(chunks)} text chunks from Excel file", excel_state
493
+ except Exception as e:
494
+ if "excel_path" in locals() and os.path.exists(excel_path):
495
+ os.unlink(excel_path)
496
+ return None, f"Error processing Excel file: {str(e)}", {"data_preview": "", "total_sheets": 0, "total_rows": 0}
497
+
498
+ # Function to analyze image using SmolDocling
499
+ def analyze_image(image_file):
500
+ """
501
+ Basic image analysis function that doesn't rely on external models
502
+ """
503
+ if image_file is None:
504
+ return "No image uploaded. Please upload an image to analyze."
505
+
506
+ try:
507
+ image = Image.open(image_file)
508
+ width, height = image.size
509
+ format = image.format
510
+ mode = image.mode
511
+
512
+ analysis = f"""## Technical Document Analysis
513
+
514
+ **Image Properties:**
515
+ - Dimensions: {width}x{height} pixels
516
+ - Format: {format}
517
+ - Color Mode: {mode}
518
+
519
+ **Technical Analysis:**
520
+ 1. Document Quality:
521
+ - Resolution: {'High' if width > 2000 or height > 2000 else 'Medium' if width > 1000 or height > 1000 else 'Low'}
522
+ - Color Depth: {mode}
523
+
524
+ 2. Recommendations:
525
+ - For text extraction, consider using PDF format
526
+ - For technical diagrams, ensure high resolution
527
+ - Consider OCR for text content
528
+
529
+ **Note:** For detailed technical analysis, please convert to PDF format
530
+ """
531
+ return analysis
532
+ except Exception as e:
533
+ return f"Error analyzing image: {str(e)}\n\nPlease try using PDF format instead."
534
+
535
+ # Function to handle different file types
536
+ def process_file(file_data, file_type):
537
+ if file_data is None:
538
+ return None, "No file uploaded", None
539
+
540
+ if file_type == "pdf":
541
+ return process_pdf(file_data)
542
+ elif file_type == "excel":
543
+ return process_excel(file_data)
544
+ elif file_type == "image":
545
+ # For image files, we'll just use them directly for analysis
546
+ # But we'll return a session ID to maintain consistency
547
+ session_id = str(uuid.uuid4())
548
+ return session_id, "βœ… Image file ready for analysis", None
549
+ else:
550
+ return None, "Unsupported file type", None
551
+
552
+ # Function for speech-to-text conversion
553
+ def speech_to_text():
554
+ try:
555
+ r = sr.Recognizer()
556
+ with sr.Microphone() as source:
557
+ r.adjust_for_ambient_noise(source)
558
+ audio = r.listen(source)
559
+ text = r.recognize_google(audio)
560
+ return text
561
+ except sr.UnknownValueError:
562
+ return "Could not understand audio. Please try again."
563
+ except sr.RequestError as e:
564
+ return f"Error with speech recognition service: {e}"
565
+ except Exception as e:
566
+ return f"Error converting speech to text: {str(e)}"
567
+
568
+ # Function for text-to-speech conversion
569
+ def text_to_speech(text, history):
570
+ if not text or not history:
571
+ return None
572
+
573
+ try:
574
+ # Get the last bot response
575
+ last_response = history[-1][1]
576
+
577
+ # Convert text to speech
578
+ tts = pyttsx3.init()
579
+ tts.setProperty('rate', 150)
580
+ tts.setProperty('volume', 0.9)
581
+ tts.save_to_file(last_response, "temp_output.mp3")
582
+ tts.runAndWait()
583
+
584
+ return "temp_output.mp3"
585
+ except Exception as e:
586
+ print(f"Error in text-to-speech: {e}")
587
+ return None
588
+
589
  # Function to generate chatbot responses with Tech theme
590
+ def generate_response(message, session_id, model_name, history, web_search_enabled=True):
591
  if not message:
592
  return history
593
  try:
 
598
  if docs:
599
  context = "\n\nRelevant information from uploaded PDF:\n" + "\n".join(f"- {doc.page_content}" for doc in docs)
600
 
601
+ # Check if it's a GitHub repo search and web search is enabled
602
+ if web_search_enabled and re.match(r'^/github\s+.+', message, re.IGNORECASE):
603
  query = re.sub(r'^/github\s+', '', message, flags=re.IGNORECASE)
604
  repo_results = search_github_repos(query)
605
  if repo_results:
 
616
  history.append((message, "No GitHub repositories found for your query."))
617
  return history
618
 
619
+ # Check if it's a Stack Overflow search and web search is enabled
620
+ if web_search_enabled and re.match(r'^/stack\s+.+', message, re.IGNORECASE):
621
  query = re.sub(r'^/stack\s+', '', message, flags=re.IGNORECASE)
622
  qa_results = search_stackoverflow(query)
623
  if qa_results:
 
910
  except Exception as e:
911
  return f"Error searching Stack Overflow: {str(e)}"
912
 
913
+ def detect_language(file_extension):
914
+ """Map file extensions to programming languages"""
915
+ language_map = {
916
+ ".py": "Python",
917
+ ".js": "JavaScript",
918
+ ".java": "Java",
919
+ ".cpp": "C++",
920
+ ".c": "C",
921
+ ".cs": "C#",
922
+ ".php": "PHP",
923
+ ".rb": "Ruby",
924
+ ".go": "Go",
925
+ ".rs": "Rust",
926
+ ".swift": "Swift",
927
+ ".kt": "Kotlin",
928
+ ".ts": "TypeScript",
929
+ ".html": "HTML",
930
+ ".css": "CSS",
931
+ ".sql": "SQL",
932
+ ".r": "R",
933
+ ".m": "Objective-C/MATLAB",
934
+ ".h": "C/C++ Header",
935
+ ".hpp": "C++ Header",
936
+ ".jsx": "React JSX",
937
+ ".tsx": "React TSX",
938
+ ".vue": "Vue.js",
939
+ ".scala": "Scala",
940
+ ".pl": "Perl",
941
+ ".sh": "Shell Script",
942
+ ".bash": "Bash Script",
943
+ ".ps1": "PowerShell",
944
+ ".yaml": "YAML",
945
+ ".yml": "YAML",
946
+ ".json": "JSON",
947
+ ".xml": "XML",
948
+ ".toml": "TOML",
949
+ ".ini": "INI"
950
+ }
951
+ return language_map.get(file_extension.lower(), "Unknown")
952
+
953
+ def analyze_code(code_file):
954
+ """Analyze code files and provide insights"""
955
+ if code_file is None:
956
+ return "No file uploaded. Please upload a code file to analyze."
957
+
958
+ try:
959
+ # Get file extension
960
+ file_extension = os.path.splitext(code_file.name)[1]
961
+ language = detect_language(file_extension)
962
+
963
+ # Read the file content
964
+ content = code_file.read().decode('utf-8', errors='ignore')
965
+
966
+ # Basic code metrics
967
+ total_lines = len(content.splitlines())
968
+ blank_lines = len([line for line in content.splitlines() if not line.strip()])
969
+ code_lines = total_lines - blank_lines
970
+
971
+ # Calculate complexity metrics
972
+ complexity_metrics = calculate_complexity(content, language)
973
+
974
+ # Generate analysis using LLM
975
+ analysis_prompt = f"""Analyze this {language} code and provide insights about:
976
+ 1. Code structure and organization
977
+ 2. Potential improvements or best practices
978
+ 3. Security considerations
979
+ 4. Performance implications
980
+ 5. Maintainability factors
981
+
982
+ Code metrics:
983
+ - Total lines: {total_lines}
984
+ - Code lines: {code_lines}
985
+ - Blank lines: {blank_lines}
986
+ {complexity_metrics}
987
+
988
+ First 1000 characters of code:
989
+ {content[:1000]}...
990
+ """
991
+
992
+ completion = client.chat.completions.create(
993
+ model="llama3-70b-8192",
994
+ messages=[
995
+ {"role": "system", "content": "You are an expert code reviewer and technical architect."},
996
+ {"role": "user", "content": analysis_prompt}
997
+ ],
998
+ temperature=0.3,
999
+ max_tokens=1500
1000
+ )
1001
+
1002
+ # Format the analysis
1003
+ analysis = f"""## Code Analysis Report
1004
+
1005
+ **File Type:** {language}
1006
+
1007
+ ### Code Metrics
1008
+ - Total Lines: {total_lines}
1009
+ - Code Lines: {code_lines}
1010
+ - Blank Lines: {blank_lines}
1011
+
1012
+ ### Complexity Analysis
1013
+ {complexity_metrics}
1014
+
1015
+ ### Expert Analysis
1016
+ {completion.choices[0].message.content}
1017
+
1018
+ ### Recommendations
1019
+ 1. Consider using a linter specific to {language}
1020
+ 2. Review the security considerations mentioned above
1021
+ 3. Consider automated testing to validate the code
1022
+ 4. Document any complex algorithms or business logic
1023
+ """
1024
+ return analysis
1025
+
1026
+ except Exception as e:
1027
+ return f"Error analyzing code: {str(e)}\n\nPlease ensure the file is properly formatted and encoded."
1028
+
1029
+ def calculate_complexity(content, language):
1030
+ """Calculate various complexity metrics based on the language"""
1031
+ try:
1032
+ # Count function/method definitions
1033
+ function_patterns = {
1034
+ "Python": r"def\s+\w+\s*\(",
1035
+ "JavaScript": r"function\s+\w+\s*\(|const\s+\w+\s*=\s*\([^)]*\)\s*=>",
1036
+ "Java": r"(public|private|protected)?\s*\w+\s+\w+\s*\([^)]*\)\s*\{",
1037
+ "C++": r"\w+\s+\w+\s*\([^)]*\)\s*\{",
1038
+ }
1039
+
1040
+ pattern = function_patterns.get(language, r"\w+\s+\w+\s*\([^)]*\)")
1041
+ function_count = len(re.findall(pattern, content))
1042
+
1043
+ # Calculate cyclomatic complexity (rough estimate)
1044
+ decision_patterns = [
1045
+ r"\bif\b",
1046
+ r"\bwhile\b",
1047
+ r"\bfor\b",
1048
+ r"\bcase\b",
1049
+ r"\bcatch\b",
1050
+ r"\b&&\b",
1051
+ r"\b\|\|\b"
1052
+ ]
1053
+
1054
+ decision_points = sum(len(re.findall(p, content)) for p in decision_patterns)
1055
+
1056
+ # Estimate maintainability
1057
+ avg_line_length = sum(len(line) for line in content.splitlines()) / len(content.splitlines()) if content.splitlines() else 0
1058
+
1059
+ return f"""**Complexity Metrics:**
1060
+ - Estimated Function Count: {function_count}
1061
+ - Decision Points: {decision_points}
1062
+ - Average Line Length: {avg_line_length:.2f} characters
1063
+ - Cyclomatic Complexity Estimate: {decision_points + 1}
1064
+ """
1065
+ except Exception as e:
1066
+ return f"Error calculating complexity: {str(e)}"
1067
+
1068
+ def update_status_with_animation(status):
1069
+ return f"""
1070
+ <div class="status-message">
1071
+ <div class="loading-container">
1072
+ <div class="loading-bar"></div>
1073
+ </div>
1074
+ > {status}
1075
+ </div>
1076
+ """
1077
+
1078
+ # Update the analysis results display
1079
+ def format_analysis_results(analysis):
1080
+ return f"""
1081
+ <div class="analysis-container">
1082
+ <div class="analysis-header">> ANALYSIS COMPLETE</div>
1083
+ {analysis}
1084
+ <div class="loading-container">
1085
+ <div class="loading-bar"></div>
1086
+ </div>
1087
+ </div>
1088
+ """
1089
+
1090
+ def format_code_metrics(metrics):
1091
+ return f"""
1092
+ <div class="metric-card">
1093
+ <div style="color: var(--neon-yellow);">SYSTEM METRICS</div>
1094
+ <div style="margin-top: 10px;">
1095
+ {metrics}
1096
+ </div>
1097
+ </div>
1098
+ """
1099
+
1100
+ # Add cyberpunk UI sound effects
1101
+ def play_interface_sound(sound_type):
1102
+ sounds = {
1103
+ "hover": "hover.mp3",
1104
+ "click": "click.mp3",
1105
+ "success": "success.mp3",
1106
+ "error": "error.mp3"
1107
+ }
1108
+ return gr.Audio(value=sounds.get(sound_type), autoplay=True, visible=False)
1109
+
1110
+ # Create the Gradio interface with advanced cyberpunk styling
1111
+ def create_cyberpunk_interface():
1112
+ css = compile_scss()
1113
+
1114
+ with gr.Blocks(css=css, head=NEURAL_JS) as demo:
1115
  current_session_id = gr.State(None)
1116
  pdf_state = gr.State({"page_images": [], "total_pages": 0, "total_words": 0})
1117
+ excel_state = gr.State({"data_preview": "", "total_sheets": 0, "total_rows": 0})
1118
+ file_type = gr.State("none")
1119
+ audio_status = gr.State("Ready")
1120
+
1121
  gr.HTML("""
1122
+ <div class="neural-interface">
1123
+ <div class="neural-interface__header">TECH-VISION_v3.0</div>
1124
+ <div class="neural-status">
1125
+ <div class="neural-status__indicator"></div>
1126
+ <div class="neural-status__text">SYSTEM ONLINE</div>
1127
+ </div>
1128
  </div>
1129
  """)
1130
+ with gr.Row(elem_classes="neural-grid"):
1131
  with gr.Column(scale=1, min_width=300):
1132
+ with gr.Tabs():
1133
+ with gr.TabItem("[SYS:SCAN] Code Analysis"):
1134
+ gr.HTML("""
1135
+ <div class="upload-container">
1136
+ <div style="color: var(--neon-blue); margin-bottom: 10px;">
1137
+ > INITIATE CODE SCAN
1138
+ </div>
1139
+ """)
1140
+ code_file = gr.File(
1141
+ label="UPLOAD SOURCE CODE",
1142
+ file_types=[".py", ".js", ".java", ".cpp", ".c", ".cs", ".php", ".rb",
1143
+ ".go", ".rs", ".swift", ".kt", ".ts", ".html", ".css",
1144
+ ".sql", ".r", ".m", ".h", ".hpp", ".jsx", ".tsx",
1145
+ ".vue", ".scala", ".pl", ".sh", ".bash", ".ps1",
1146
+ ".yaml", ".yml", ".json", ".xml", ".toml", ".ini"],
1147
+ type="binary"
1148
+ )
1149
+ gr.HTML("</div>")
1150
+ code_analyze_btn = gr.Button("INITIATE ANALYSIS", elem_classes="primary-btn")
1151
+
1152
+ with gr.TabItem("PDF"):
1153
+ pdf_file = gr.File(label="Upload PDF Document", file_types=[".pdf"], type="binary")
1154
+ pdf_upload_button = gr.Button("Process PDF", variant="primary")
1155
+
1156
+ with gr.TabItem("Excel"):
1157
+ excel_file = gr.File(label="Upload Excel File", file_types=[".xlsx", ".xls"], type="binary")
1158
+ excel_upload_button = gr.Button("Process Excel", variant="primary")
1159
+
1160
+ with gr.TabItem("Image"):
1161
+ image_input = gr.File(
1162
+ label="Upload Image",
1163
+ file_types=["image"],
1164
+ type="filepath"
1165
+ )
1166
+ analyze_btn = gr.Button("Analyze Image")
1167
+
1168
+ file_status = gr.Markdown("No file uploaded yet")
1169
+
1170
+ # Model selector
1171
  model_dropdown = gr.Dropdown(
1172
  choices=["llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768", "gemma-7b-it"],
1173
  value="llama3-70b-8192",
1174
  label="Select Groq Model"
1175
  )
1176
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1177
  with gr.Column(scale=2, min_width=600):
1178
  with gr.Tabs():
1179
  with gr.TabItem("PDF Viewer"):
1180
  with gr.Column(elem_classes="pdf-viewer-container"):
1181
  page_slider = gr.Slider(minimum=1, maximum=1, step=1, label="Page Number", value=1)
1182
  pdf_image = gr.Image(label="PDF Page", type="pil", elem_classes="pdf-viewer-image")
1183
+ pdf_stats = gr.Markdown("No PDF uploaded yet", elem_classes="stats-box")
1184
 
1185
+ with gr.TabItem("Excel Viewer"):
1186
+ excel_preview = gr.HTML(label="Excel Preview", elem_classes="file-preview")
1187
+ excel_stats = gr.Markdown("No Excel file uploaded yet", elem_classes="stats-box")
1188
 
1189
+ with gr.TabItem("Image Analysis"):
1190
+ image_preview = gr.Image(label="Image Preview", type="pil")
1191
+ image_analysis_results = gr.Markdown("Upload an image and click 'Analyze Image' to see analysis results")
1192
+
1193
+ with gr.TabItem("Code Analysis Results"):
1194
+ analysis_results = gr.Markdown("Upload a code file and click 'Analyze Code' to see analysis results")
1195
+ with gr.Row():
1196
+ copy_btn = gr.Button("πŸ“‹ Copy Analysis")
1197
+ export_btn = gr.Button("πŸ“₯ Export Report")
1198
+
1199
+ # Audio visualization elements
1200
+ with gr.Row(elem_classes="container"):
1201
+ with gr.Column():
1202
+ audio_vis = gr.HTML("""
1203
+ <div class="audio-visualization">
1204
+ <div class="audio-bar" style="height: 5px;"></div>
1205
+ <div class="audio-bar" style="height: 12px;"></div>
1206
+ <div class="audio-bar" style="height: 18px;"></div>
1207
+ <div class="audio-bar" style="height: 15px;"></div>
1208
+ <div class="audio-bar" style="height: 10px;"></div>
1209
+ <div class="audio-bar" style="height: 20px;"></div>
1210
+ <div class="audio-bar" style="height: 14px;"></div>
1211
+ <div class="audio-bar" style="height: 8px;"></div>
1212
+ </div>
1213
+ """, visible=False)
1214
+ audio_status_display = gr.Markdown("", elem_classes="audio-status")
1215
 
1216
+ # Chat interface
1217
  with gr.Row(elem_classes="container"):
1218
  with gr.Column(scale=2, min_width=600):
1219
+ chatbot = gr.Chatbot(
1220
+ height=400,
1221
+ show_copy_button=True,
1222
+ elem_classes="chat-container",
1223
+ type="messages" # Use the new messages format
1224
+ )
1225
  with gr.Row():
1226
+ msg = gr.Textbox(
1227
+ show_label=False,
1228
+ placeholder="Ask about your document or click the microphone to speak...",
1229
+ scale=5
1230
+ )
1231
+ voice_btn = gr.Button("🎀", elem_classes="voice-btn")
1232
  send_btn = gr.Button("Send", scale=1)
1233
+
1234
+ with gr.Row(elem_classes="audio-controls"):
1235
+ clear_btn = gr.Button("Clear Conversation")
1236
+ speak_btn = gr.Button("πŸ”Š Speak Response", elem_classes="speak-btn")
1237
+ audio_player = gr.Audio(label="Response Audio", type="filepath", visible=False)
1238
 
1239
+ # Event Handlers for PDF processing
1240
+ pdf_upload_button.click(
1241
+ lambda x: ("pdf", x),
1242
+ inputs=[pdf_file],
1243
+ outputs=[file_type, file_status]
1244
+ ).then(
1245
  process_pdf,
1246
  inputs=[pdf_file],
1247
+ outputs=[current_session_id, file_status, pdf_state]
1248
  ).then(
1249
  update_pdf_viewer,
1250
  inputs=[pdf_state],
1251
+ outputs=[page_slider, pdf_image, pdf_stats]
1252
  )
1253
 
1254
+ # Event Handlers for Excel processing
1255
+ def update_excel_preview(state):
1256
+ if not state:
1257
+ return "", "No Excel file uploaded yet"
1258
+ preview = state.get("data_preview", "")
1259
+ sheets = state.get("total_sheets", 0)
1260
+ rows = state.get("total_rows", 0)
1261
+ stats = f"**Excel Statistics:**\nSheets: {sheets}\nTotal Rows: {rows}"
1262
+ return preview, stats
1263
+
1264
+ excel_upload_button.click(
1265
+ lambda x: ("excel", x),
1266
+ inputs=[excel_file],
1267
+ outputs=[file_type, file_status]
1268
+ ).then(
1269
+ process_excel,
1270
+ inputs=[excel_file],
1271
+ outputs=[current_session_id, file_status, excel_state]
1272
+ ).then(
1273
+ update_excel_preview,
1274
+ inputs=[excel_state],
1275
+ outputs=[excel_preview, excel_stats]
1276
+ )
1277
+
1278
+ # Event Handlers for Image Analysis
1279
+ analyze_btn.click(
1280
+ lambda x: ("image", x),
1281
+ inputs=[image_input],
1282
+ outputs=[file_type, file_status]
1283
+ ).then(
1284
+ analyze_image,
1285
+ inputs=[image_input],
1286
+ outputs=[image_analysis_results]
1287
+ ).then(
1288
+ lambda x: Image.open(x) if x else None,
1289
+ inputs=[image_input],
1290
+ outputs=[image_preview]
1291
+ )
1292
+
1293
+ # Event Handlers for Code Analysis
1294
+ code_analyze_btn.click(
1295
+ update_status_with_animation,
1296
+ inputs=[],
1297
+ outputs=[file_status]
1298
+ ).then(
1299
+ analyze_code,
1300
+ inputs=[code_file],
1301
+ outputs=[analysis_results]
1302
+ ).then(
1303
+ format_analysis_results,
1304
+ inputs=[analysis_results],
1305
+ outputs=[analysis_results]
1306
+ )
1307
+
1308
+ # Chat message handling
1309
  msg.submit(
1310
  generate_response,
1311
  inputs=[msg, current_session_id, model_dropdown, chatbot],
 
1318
  outputs=[chatbot]
1319
  ).then(lambda: "", None, [msg])
1320
 
1321
+ # Improved speech-to-text with visual feedback
1322
+ voice_btn.click(
1323
+ speech_to_text,
1324
+ inputs=[audio_status],
1325
+ outputs=[audio_status_display, audio_vis, msg]
1326
  )
1327
 
1328
+ # Improved text-to-speech with visual feedback
1329
+ speak_btn.click(
1330
+ text_to_speech,
1331
+ inputs=[audio_status, chatbot],
1332
+ outputs=[audio_status_display, audio_vis, audio_player]
1333
+ ).then(
1334
+ lambda x: gr.update(visible=True) if x else gr.update(visible=False),
1335
+ inputs=[audio_player],
1336
+ outputs=[audio_player]
1337
+ )
1338
+
1339
+ # Page navigation for PDF
1340
  page_slider.change(
1341
  update_image,
1342
  inputs=[page_slider, pdf_state],
1343
  outputs=[pdf_image]
1344
  )
1345
 
1346
+ # Clear conversation and reset UI
1347
+ clear_btn.click(
1348
+ lambda: (
1349
+ [], None, "No file uploaded yet",
1350
+ {"page_images": [], "total_pages": 0, "total_words": 0},
1351
+ {"data_preview": "", "total_sheets": 0, "total_rows": 0},
1352
+ "none", 0, None, "No PDF uploaded yet", "",
1353
+ "No Excel file uploaded yet", None,
1354
+ "Upload an image and click 'Analyze Image' to see results", None,
1355
+ gr.update(visible=False), "Ready"
1356
+ ),
1357
+ None,
1358
+ [chatbot, current_session_id, file_status, pdf_state, excel_state,
1359
+ file_type, page_slider, pdf_image, pdf_stats, excel_preview,
1360
+ excel_stats, image_preview, image_analysis_results, audio_player,
1361
+ audio_vis, audio_status_display]
 
1362
  )
1363
 
1364
+ # Add footer with creator attribution
1365
+ gr.HTML("""
1366
+ <div style="text-align: center; margin-top: 20px; padding: 10px; color: #666; font-size: 0.8rem; border-top: 1px solid #eee;">
1367
+ Created by Calvin Allen Crawford
1368
+ </div>
1369
+ """)
1370
+
1371
+ return demo
1372
 
1373
  # Launch the app
1374
  if __name__ == "__main__":
1375
+ demo = create_cyberpunk_interface()
1376
  demo.launch()