team11aiml commited on
Commit
6685d41
Β·
verified Β·
1 Parent(s): e61aa87

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +236 -86
app.py CHANGED
@@ -1,87 +1,237 @@
1
- import gradio as gr
2
- from modules.code_assistant import CodeAssistant
3
- from modules.docs_assistant import DocsAssistant
4
- from modules.pdf_assistant import PDFAssistant
5
- from modules.utils import load_css
6
-
7
- def create_app():
8
- # Initialize assistants
9
- code_assistant = CodeAssistant()
10
- docs_assistant = DocsAssistant()
11
- pdf_assistant = PDFAssistant()
12
-
13
- with gr.Blocks(css=load_css()) as demo:
14
- gr.Markdown("# Enterprise RAG Assistant")
15
-
16
- with gr.Tabs() as tabs:
17
- # Code Assistant Tab
18
- with gr.Tab("Code Assistant", id=1):
19
- with gr.Row():
20
- with gr.Column():
21
- code_input = gr.Textbox(
22
- label="Ask coding questions",
23
- placeholder="Enter your coding question...",
24
- lines=3
25
- )
26
- code_submit = gr.Button("Get Code Solution")
27
- code_output = gr.Code(
28
- label="Code Output",
29
- language="python"
30
- )
31
-
32
- # Documentation Assistant Tab
33
- with gr.Tab("Documentation Assistant", id=2):
34
- with gr.Row():
35
- with gr.Column():
36
- docs_input = gr.Textbox(
37
- label="Documentation Query",
38
- placeholder="Ask about technical documentation...",
39
- lines=3
40
- )
41
- docs_file = gr.File(
42
- label="Upload Documentation",
43
- file_types=[".pdf", ".txt", ".md"]
44
- )
45
- docs_submit = gr.Button("Search Documentation")
46
- docs_output = gr.Markdown()
47
-
48
- # PDF RAG Assistant Tab
49
- with gr.Tab("PDF Assistant", id=3):
50
- with gr.Row():
51
- with gr.Column():
52
- pdf_file = gr.File(
53
- label="Upload PDF",
54
- file_types=[".pdf"]
55
- )
56
- pdf_query = gr.Textbox(
57
- label="Ask about the PDF",
58
- placeholder="Enter your question about the PDF...",
59
- lines=3
60
- )
61
- pdf_submit = gr.Button("Get Answer")
62
- pdf_output = gr.Markdown()
63
-
64
- # Event handlers
65
- code_submit.click(
66
- code_assistant.generate_response,
67
- inputs=[code_input],
68
- outputs=[code_output]
69
- )
70
-
71
- docs_submit.click(
72
- docs_assistant.search_docs,
73
- inputs=[docs_input, docs_file],
74
- outputs=[docs_output]
75
- )
76
-
77
- pdf_submit.click(
78
- pdf_assistant.answer_query,
79
- inputs=[pdf_query, pdf_file],
80
- outputs=[pdf_output]
81
- )
82
-
83
- return demo
84
-
85
- if __name__ == "__main__":
86
- app = create_app()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  app.launch()
 
1
+ import gradio as gr
2
+ import spaces
3
+ import torch
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
+ from PyPDF2 import PdfReader
6
+
7
+ # Verify GPU availability
8
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
9
+ print(f"Using device: {device}")
10
+
11
+ class UnifiedAssistant:
12
+ def __init__(self):
13
+ # Initialize Code Assistant (Qwen)
14
+ print("Loading Code Assistant Model...")
15
+ self.code_model_name = "Qwen/Qwen2.5-Coder-32B-Instruct"
16
+ self.code_tokenizer = AutoTokenizer.from_pretrained(self.code_model_name)
17
+ self.code_model = AutoModelForCausalLM.from_pretrained(
18
+ self.code_model_name,
19
+ torch_dtype=torch.bfloat16,
20
+ device_map="auto"
21
+ )
22
+
23
+ # Initialize Docs Assistant (DocGPT)
24
+ print("Loading Documentation Assistant Model...")
25
+ self.docs_model_name = "Arc53/docsgpt-40b-falcon"
26
+ self.docs_tokenizer = AutoTokenizer.from_pretrained(self.docs_model_name)
27
+ self.docs_model = AutoModelForCausalLM.from_pretrained(
28
+ self.docs_model_name,
29
+ torch_dtype=torch.bfloat16,
30
+ device_map="auto"
31
+ )
32
+
33
+ # Initialize PDF Assistant (Llama)
34
+ print("Loading PDF Assistant Model...")
35
+ self.pdf_model_name = "meta-llama/Llama-3.3-70B-Instruct"
36
+ self.pdf_tokenizer = AutoTokenizer.from_pretrained(self.pdf_model_name)
37
+ self.pdf_model = AutoModelForCausalLM.from_pretrained(
38
+ self.pdf_model_name,
39
+ torch_dtype=torch.bfloat16,
40
+ device_map="auto"
41
+ )
42
+
43
+ @spaces.GPU
44
+ def process_code_query(self, query):
45
+ try:
46
+ inputs = self.code_tokenizer(query, return_tensors="pt").to(self.code_model.device)
47
+ outputs = self.code_model.generate(
48
+ **inputs,
49
+ max_length=2048,
50
+ temperature=0.7,
51
+ top_p=0.95,
52
+ do_sample=True
53
+ )
54
+ return self.code_tokenizer.decode(outputs[0], skip_special_tokens=True)
55
+ except Exception as e:
56
+ return f"Error processing code query: {str(e)}"
57
+
58
+ @spaces.GPU
59
+ def process_docs_query(self, query, doc_file):
60
+ try:
61
+ if doc_file is None:
62
+ return "Please upload a documentation file."
63
+
64
+ doc_content = self._read_file_content(doc_file)
65
+ prompt = f"Documentation: {doc_content}\nQuery: {query}"
66
+
67
+ inputs = self.docs_tokenizer(prompt, return_tensors="pt").to(self.docs_model.device)
68
+ outputs = self.docs_model.generate(
69
+ **inputs,
70
+ max_length=1024,
71
+ temperature=0.3,
72
+ top_p=0.95
73
+ )
74
+ return self.docs_tokenizer.decode(outputs[0], skip_special_tokens=True)
75
+ except Exception as e:
76
+ return f"Error processing documentation query: {str(e)}"
77
+
78
+ @spaces.GPU
79
+ def process_pdf_query(self, query, pdf_file):
80
+ try:
81
+ if pdf_file is None:
82
+ return "Please upload a PDF file."
83
+
84
+ pdf_text = self._extract_pdf_text(pdf_file)
85
+ prompt = f"Context from PDF: {pdf_text}\nQuestion: {query}"
86
+
87
+ inputs = self.pdf_tokenizer(prompt, return_tensors="pt").to(self.pdf_model.device)
88
+ outputs = self.pdf_model.generate(
89
+ **inputs,
90
+ max_length=1024,
91
+ temperature=0.3,
92
+ top_p=0.95
93
+ )
94
+ return self.pdf_tokenizer.decode(outputs[0], skip_special_tokens=True)
95
+ except Exception as e:
96
+ return f"Error processing PDF query: {str(e)}"
97
+
98
+ def _read_file_content(self, file):
99
+ content = ""
100
+ if file.name.endswith('.pdf'):
101
+ content = self._extract_pdf_text(file)
102
+ else:
103
+ content = file.read().decode('utf-8')
104
+ return content
105
+
106
+ def _extract_pdf_text(self, pdf_file):
107
+ reader = PdfReader(pdf_file)
108
+ text = ""
109
+ for page in reader.pages:
110
+ text += page.extract_text() + "\n"
111
+ return text
112
+
113
+ # Custom CSS for better UI
114
+ css = """
115
+ .gradio-container {
116
+ font-family: 'Inter', sans-serif;
117
+ max-width: 1200px !important;
118
+ margin: auto;
119
+ }
120
+ .tabs {
121
+ background: #f8f9fa;
122
+ border-radius: 10px;
123
+ padding: 20px;
124
+ margin-bottom: 20px;
125
+ }
126
+ .input-box {
127
+ border: 1px solid #e0e0e0;
128
+ border-radius: 8px;
129
+ padding: 12px;
130
+ }
131
+ .button {
132
+ background: #2d63c8 !important;
133
+ color: white !important;
134
+ border-radius: 6px !important;
135
+ padding: 10px 20px !important;
136
+ transition: all 0.3s ease !important;
137
+ }
138
+ .button:hover {
139
+ background: #1e4a9d !important;
140
+ transform: translateY(-1px) !important;
141
+ }
142
+ .output-box {
143
+ background: #ffffff;
144
+ border: 1px solid #e0e0e0;
145
+ border-radius: 8px;
146
+ padding: 16px;
147
+ margin-top: 12px;
148
+ }
149
+ """
150
+
151
+ def create_app():
152
+ print("Initializing RAG Assistant...")
153
+ assistant = UnifiedAssistant()
154
+
155
+ with gr.Blocks(css=css) as demo:
156
+ gr.Markdown("""
157
+ # πŸš€ Enterprise RAG Assistant
158
+ ### Your AI-Powered Documentation & Code Assistant
159
+
160
+ This application combines three powerful AI models:
161
+ - πŸ’» Code Assistant (Qwen2.5-Coder-32B)
162
+ - πŸ“š Documentation Helper (DocGPT-40B)
163
+ - πŸ“‘ PDF Analyzer (Llama-3.3-70B)
164
+ """)
165
+
166
+ with gr.Tabs() as tabs:
167
+ # Code Assistant Tab
168
+ with gr.Tab("πŸ’» Code Assistant", id=1):
169
+ with gr.Row():
170
+ with gr.Column():
171
+ code_input = gr.Textbox(
172
+ label="Ask coding questions",
173
+ placeholder="Enter your coding question...",
174
+ lines=3
175
+ )
176
+ code_submit = gr.Button("πŸ” Get Code Solution", variant="primary")
177
+ code_output = gr.Code(
178
+ label="Code Output",
179
+ language="python"
180
+ )
181
+
182
+ # Documentation Assistant Tab
183
+ with gr.Tab("πŸ“š Documentation Assistant", id=2):
184
+ with gr.Row():
185
+ with gr.Column():
186
+ docs_input = gr.Textbox(
187
+ label="Documentation Query",
188
+ placeholder="Ask about technical documentation...",
189
+ lines=3
190
+ )
191
+ docs_file = gr.File(
192
+ label="Upload Documentation",
193
+ file_types=[".pdf", ".txt", ".md"]
194
+ )
195
+ docs_submit = gr.Button("πŸ” Search Documentation", variant="primary")
196
+ docs_output = gr.Markdown()
197
+
198
+ # PDF RAG Assistant Tab
199
+ with gr.Tab("πŸ“‘ PDF Assistant", id=3):
200
+ with gr.Row():
201
+ with gr.Column():
202
+ pdf_file = gr.File(
203
+ label="Upload PDF",
204
+ file_types=[".pdf"]
205
+ )
206
+ pdf_query = gr.Textbox(
207
+ label="Ask about the PDF",
208
+ placeholder="Enter your question about the PDF...",
209
+ lines=3
210
+ )
211
+ pdf_submit = gr.Button("πŸ” Get Answer", variant="primary")
212
+ pdf_output = gr.Markdown()
213
+
214
+ # Event handlers
215
+ code_submit.click(
216
+ assistant.process_code_query,
217
+ inputs=[code_input],
218
+ outputs=[code_output]
219
+ )
220
+
221
+ docs_submit.click(
222
+ assistant.process_docs_query,
223
+ inputs=[docs_input, docs_file],
224
+ outputs=[docs_output]
225
+ )
226
+
227
+ pdf_submit.click(
228
+ assistant.process_pdf_query,
229
+ inputs=[pdf_query, pdf_file],
230
+ outputs=[pdf_output]
231
+ )
232
+
233
+ return demo
234
+
235
+ if __name__ == "__main__":
236
+ app = create_app()
237
  app.launch()