latterworks commited on
Commit
44d06c0
·
verified ·
1 Parent(s): 059111e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +485 -687
app.py CHANGED
@@ -1,766 +1,564 @@
1
- # app.py - Ollama Scanner Application
2
-
3
- import asyncio
4
- import bcrypt
5
- import gradio as gr
6
- import logging
7
  import os
 
8
  import requests
9
- import shodan
10
- import time
11
- from datasets import load_dataset, Dataset
12
  from huggingface_hub import HfApi, login
13
- import os
14
- from typing import List, Dict, Any, Optional, Tuple
15
- import pandas as pd
 
 
 
16
 
17
  # Configure logging
18
  logging.basicConfig(
19
  level=logging.INFO,
20
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
21
- handlers=[logging.StreamHandler()]
22
  )
23
- logger = logging.getLogger("ollama_scanner")
24
 
25
- # Dataset constants
26
- DATASET_NAME = "latterworks/llama_checker_results"
27
- DATASET_COLUMNS = ["ip", "port", "country", "region", "org", "models"]
28
-
29
- # Security helper functions
30
- def verify_password(entered_password: str, stored_password_hash: str) -> bool:
31
- """Verify a password against its hash using bcrypt."""
32
  try:
33
- # Verify password
34
- return bcrypt.checkpw(
35
- entered_password.encode('utf-8'),
36
- stored_password_hash.encode('utf-8')
37
- )
38
  except Exception as e:
39
- logger.exception("Password verification error:")
40
- return False
41
 
42
- def hash_password(password: str) -> str:
43
- """Hash a password using bcrypt."""
44
- return bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()).decode('utf-8')
 
 
 
45
 
46
- # Dataset helper functions
47
- def get_dataset() -> Dataset:
48
- """Get the dataset with Ollama instances and models."""
 
 
 
 
 
 
 
 
 
49
  try:
50
- # Try to load the existing dataset
51
- hf_token = os.getenv("HF_TOKEN")
52
- dataset = load_dataset(DATASET_NAME, use_auth_token=hf_token)
53
- logger.info(f"Loaded existing dataset: {DATASET_NAME}")
54
  return dataset["train"]
55
  except Exception as e:
56
- logger.warning(f"Could not load existing dataset: {e}")
57
- # Create a new empty dataset
58
- empty_data = {
59
- 'ip': [],
60
- 'port': [],
61
- 'country': [],
62
- 'region': [],
63
- 'org': [],
64
- 'models': []
65
- }
66
- dataset = Dataset.from_dict(empty_data)
67
- logger.info(f"Created new empty dataset")
68
- return dataset
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
- def push_dataset(dataset: Dataset) -> None:
71
- """Push the dataset to the Hub."""
72
- try:
73
- hf_token = os.getenv("HF_TOKEN")
74
- dataset.push_to_hub(DATASET_NAME, token=hf_token)
75
- logger.info(f"Pushed dataset to Hub: {DATASET_NAME}")
76
- except Exception as e:
77
- logger.error(f"Failed to push dataset to Hub: {e}")
78
-
79
- def find_instance_index(dataset: Dataset, ip: str, port: int) -> Optional[int]:
80
- """Find the index of an instance in the dataset."""
81
- for i, item in enumerate(dataset):
82
- if item['ip'] == ip and item['port'] == port:
83
- return i
84
- return None
85
-
86
- def add_or_update_instance(dataset: Dataset, instance_data: Dict[str, Any],
87
- update_models: bool = False) -> Dataset:
88
- """Add a new instance or update an existing one in the dataset."""
89
- ip = instance_data['ip']
90
- port = instance_data['port']
91
 
92
- # Convert to dictionaries for manipulation
93
- items = [dict(item) for item in dataset]
 
 
 
 
 
 
 
 
94
 
95
- # Check if instance exists
96
- instance_idx = find_instance_index(dataset, ip, port)
 
 
 
 
97
 
98
- if instance_idx is not None:
99
- # Update existing instance
100
- if update_models or 'models' not in instance_data:
101
- # Keep existing models if not updating models or models not provided
102
- instance_data['models'] = items[instance_idx].get('models', [])
103
-
104
- items[instance_idx] = instance_data
105
  else:
106
- # Add new instance
107
- if 'models' not in instance_data:
108
- instance_data['models'] = []
109
- items.append(instance_data)
110
-
111
- # Convert back to dataset
112
- new_dataset = Dataset.from_dict({
113
- 'ip': [item['ip'] for item in items],
114
- 'port': [item['port'] for item in items],
115
- 'country': [item.get('country', '') for item in items],
116
- 'region': [item.get('region', '') for item in items],
117
- 'org': [item.get('org', '') for item in items],
118
- 'models': [item.get('models', []) for item in items]
119
- })
120
-
121
- return new_dataset
122
 
123
- def update_instance_models(dataset: Dataset, ip: str, port: int,
124
- models: List[Dict[str, Any]]) -> Dataset:
125
- """Update the models for an existing instance in the dataset."""
126
- instance_idx = find_instance_index(dataset, ip, port)
127
- if instance_idx is None:
128
- logger.error(f"Instance {ip}:{port} not found in dataset")
129
- return dataset
130
 
131
- # Convert to dictionaries for manipulation
132
- items = [dict(item) for item in dataset]
133
-
134
- # Update models
135
- items[instance_idx]['models'] = models
136
-
137
- # Convert back to dataset
138
- new_dataset = Dataset.from_dict({
139
- 'ip': [item['ip'] for item in items],
140
- 'port': [item['port'] for item in items],
141
- 'country': [item.get('country', '') for item in items],
142
- 'region': [item.get('region', '') for item in items],
143
- 'org': [item.get('org', '') for item in items],
144
- 'models': [item.get('models', []) for item in items]
145
- })
146
 
147
- return new_dataset
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
- # Ollama and Shodan functions
150
- async def check_ollama_endpoint(dataset: Dataset, ip: str, port: int) -> Dataset:
151
- """Check an Ollama endpoint and update the dataset with model information."""
 
 
 
 
 
 
 
 
 
 
 
152
  url = f"http://{ip}:{port}/api/tags"
153
 
 
 
154
  try:
155
- # Send request to Ollama API
156
- response = requests.get(url, timeout=5)
157
  response.raise_for_status()
158
 
159
- # Parse response
160
  data = response.json()
161
 
162
- # Extract models
163
- models = []
164
- for model_data in data.get('models', []):
165
- model = {
166
- 'name': model_data.get('name', ''),
167
- 'family': model_data.get('details', {}).get('family', ''),
168
- 'parameter_size': model_data.get('details', {}).get('parameter_size', ''),
169
- 'quantization_level': model_data.get('details', {}).get('quantization_level', ''),
170
- 'digest': model_data.get('digest', ''),
171
- 'modified_at': model_data.get('modified_at', ''),
172
- 'size': model_data.get('size', 0)
173
- }
174
- models.append(model)
175
-
176
- # Update dataset
177
- updated_dataset = update_instance_models(dataset, ip, port, models)
178
- logger.info(f"Updated models for {ip}:{port} - found {len(models)} models")
179
- return updated_dataset
180
-
 
 
181
  except requests.exceptions.RequestException as e:
182
- logger.error(f"Network error checking {ip}:{port}: {e}")
183
- # Update with empty models list to indicate connection failed
184
- updated_dataset = update_instance_models(dataset, ip, port, [])
185
- return updated_dataset
186
-
187
  except ValueError as e:
188
  logger.error(f"Invalid JSON from {ip}:{port}: {e}")
189
- # Update with empty models list to indicate invalid JSON
190
- updated_dataset = update_instance_models(dataset, ip, port, [])
191
- return updated_dataset
192
-
193
  except Exception as e:
194
- logger.exception(f"Unexpected error checking {ip}:{port}:")
195
- # Update with empty models list to indicate unexpected error
196
- updated_dataset = update_instance_models(dataset, ip, port, [])
197
- return updated_dataset
198
-
199
- async def scan_ollama_instances(progress=None) -> Tuple[str, Dataset]:
200
- """Scan for Ollama instances using Shodan and check each endpoint."""
201
- if progress is None:
202
- progress = gr.Progress()
203
 
204
- # Get secrets
205
- try:
206
- shodan_api_key = os.getenv("SHODAN_API_KEY")
207
- shodan_query = os.getenv("SHODAN_QUERY")
208
-
209
- if not shodan_api_key:
210
- raise ValueError("SHODAN_API_KEY environment variable is not set")
211
- if not shodan_query:
212
- raise ValueError("SHODAN_QUERY environment variable is not set")
213
- except Exception as e:
214
- logger.error(f"Failed to get secrets: {e}")
215
- return "Error: Failed to retrieve secrets", None
216
-
217
- # Initialize Shodan API
218
- api = shodan.Shodan(shodan_api_key)
219
-
220
- try:
221
- # Search for Ollama instances
222
- logger.info(f"Starting Shodan search with query: {shodan_query}")
223
- results = api.search(shodan_query, limit=1000)
224
-
225
- total_results = results['total']
226
- logger.info(f"Found {total_results} results")
227
-
228
- # Get dataset
229
- dataset = get_dataset()
230
-
231
- # Process results
232
- new_instances = 0
233
- updated_instances = 0
234
-
235
- progress(0, desc="Processing Shodan results")
236
- for i, result in enumerate(results['matches']):
237
- progress((i + 1) / len(results['matches']), desc="Processing Shodan results")
238
-
239
- ip = result['ip_str']
240
- port = result.get('port', 11434)
241
-
242
- # Check if already in dataset
243
- instance_idx = find_instance_index(dataset, ip, port)
244
-
245
- # Prepare instance data
246
- instance_data = {
247
- 'ip': ip,
248
- 'port': port,
249
- 'country': result.get('location', {}).get('country_name', ''),
250
- 'region': result.get('location', {}).get('region_name', ''),
251
- 'org': result.get('org', ''),
252
- 'models': []
253
- }
254
-
255
- # Add or update instance
256
- if instance_idx is None:
257
- dataset = add_or_update_instance(dataset, instance_data)
258
- new_instances += 1
259
- else:
260
- dataset = add_or_update_instance(dataset, instance_data, update_models=False)
261
- updated_instances += 1
262
-
263
- # Push initial updates
264
- push_dataset(dataset)
265
-
266
- # Check all instances
267
- progress(0, desc="Checking Ollama endpoints")
268
- instances = [dict(item) for item in dataset]
269
-
270
- for i, instance in enumerate(instances):
271
- progress((i + 1) / len(instances), desc=f"Checking Ollama endpoint {i+1}/{len(instances)}")
272
-
273
- ip = instance['ip']
274
- port = instance['port']
275
-
276
- # Check endpoint
277
- dataset = await check_ollama_endpoint(dataset, ip, port)
278
-
279
- # Push final updates
280
- push_dataset(dataset)
281
-
282
- return f"Scan completed. Found {total_results} Shodan results. Added {new_instances} new instances, updated {updated_instances} existing instances.", dataset
283
-
284
- except shodan.APIError as e:
285
- logger.error(f"Shodan API error: {e}")
286
- return f"Error: Shodan API error - {e}", None
287
-
288
- except Exception as e:
289
- logger.exception("Unexpected error during scan:")
290
- return f"Error: {str(e)}", None
291
 
292
- def filter_and_search_models(
293
- dataset: Dataset,
294
- family: Optional[str] = None,
295
- parameter_size: Optional[str] = None,
296
- name_search: Optional[str] = None,
297
- is_admin: bool = False
298
- ) -> Tuple[pd.DataFrame, List[Dict[str, Any]]]:
299
- """Filter and search models in the dataset."""
300
- # Extract all models from all instances
301
- all_models = []
302
 
303
- for instance in dataset:
304
- ip = instance['ip']
305
- port = instance['port']
306
- country = instance.get('country', '')
307
- region = instance.get('region', '')
308
- org = instance.get('org', '')
309
-
310
- for model in instance.get('models', []):
311
- model_info = {
312
- 'ip': ip,
313
- 'port': port,
314
- 'country': country,
315
- 'region': region,
316
- 'org': org,
317
- 'name': model.get('name', ''),
318
- 'family': model.get('family', ''),
319
- 'parameter_size': model.get('parameter_size', ''),
320
- 'quantization_level': model.get('quantization_level', ''),
321
- 'digest': model.get('digest', ''),
322
- 'modified_at': model.get('modified_at', ''),
323
- 'size_bytes': model.get('size', 0),
324
- 'size_gb': round(model.get('size', 0) / (1024**3), 2) if model.get('size', 0) else 0
325
- }
326
- all_models.append(model_info)
327
 
328
- # Apply filters
329
- filtered_models = all_models
 
 
 
 
 
 
 
 
 
330
 
331
- if family and family != "All":
332
- filtered_models = [m for m in filtered_models if m['family'] == family]
 
 
 
 
 
 
333
 
334
- if parameter_size and parameter_size != "All":
335
- filtered_models = [m for m in filtered_models if m['parameter_size'] == parameter_size]
 
336
 
337
- if name_search:
338
- name_search = name_search.lower()
339
- filtered_models = [m for m in filtered_models if name_search in m['name'].lower()]
 
340
 
341
- # Create DataFrame for display
342
- if filtered_models:
343
- display_columns = ['name', 'family', 'parameter_size', 'quantization_level', 'size_gb']
344
- if is_admin:
345
- display_columns = ['ip', 'port', 'country', 'region'] + display_columns
346
-
347
- df = pd.DataFrame(filtered_models)[display_columns]
348
- else:
349
- display_columns = ['name', 'family', 'parameter_size', 'quantization_level', 'size_gb']
350
- if is_admin:
351
- display_columns = ['ip', 'port', 'country', 'region'] + display_columns
352
-
353
- df = pd.DataFrame(columns=display_columns)
354
-
355
- return df, filtered_models
356
-
357
- def get_unique_values(dataset: Dataset) -> Tuple[List[str], List[str]]:
358
- """Get unique family and parameter size values from the dataset."""
359
- families = set()
360
- parameter_sizes = set()
361
-
362
- for instance in dataset:
363
- for model in instance.get('models', []):
364
- family = model.get('family', '')
365
- parameter_size = model.get('parameter_size', '')
366
-
367
- if family:
368
- families.add(family)
369
- if parameter_size:
370
- parameter_sizes.add(parameter_size)
371
 
372
- return ["All"] + sorted(list(families)), ["All"] + sorted(list(parameter_sizes))
373
-
374
- # Gradio interface functions
375
- def login_submit(password: str) -> Tuple[bool, str, str]:
376
- """Handle admin login."""
377
- try:
378
- stored_password = os.getenv("ADMIN_PASSWORD")
379
-
380
- if not stored_password:
381
- logger.error("ADMIN_PASSWORD environment variable is not set")
382
- return False, "error", "Admin password is not configured. Please contact the administrator."
383
-
384
- # Check if stored password is already hashed
385
- if stored_password.startswith('$2b
386
-
387
- def search_models(
388
- family: str,
389
- parameter_size: str,
390
- name_search: str,
391
- is_admin: bool,
392
- dataset: Dataset
393
- ) -> Tuple[pd.DataFrame, List[Dict[str, Any]]]:
394
- """Search and filter models in the dataset."""
395
- df, details = filter_and_search_models(
396
- dataset,
397
- family=None if family == "All" else family,
398
- parameter_size=None if parameter_size == "All" else parameter_size,
399
- name_search=name_search,
400
- is_admin=is_admin
401
- )
402
- return df, details
403
-
404
- def show_model_details(evt: gr.SelectData, models: List[Dict[str, Any]]) -> Dict[str, Any]:
405
- """Show details for a selected model."""
406
- if not models or evt.index[0] >= len(models):
407
- return {}
408
 
409
- model = models[evt.index[0]]
410
- return model
411
-
412
- # Main Gradio application
413
- def create_app():
414
- with gr.Blocks(title="Ollama Scanner", theme=gr.themes.Soft()) as app:
415
- # State variables
416
- admin_logged_in = gr.State(False)
417
- dataset_state = gr.State(get_dataset())
418
- model_details_state = gr.State([])
419
-
420
- # Header
421
- gr.Markdown("# 🔍 Ollama Scanner")
422
- gr.Markdown("Browse publicly accessible Ollama instances and their models")
423
-
424
- # Login tab
425
- with gr.Tab("Admin Login") as login_tab:
426
- with gr.Group():
427
- gr.Markdown("### Admin Login")
428
- gr.Markdown("Enter the admin password to access administrative features.")
429
-
430
- admin_password = gr.Textbox(
431
- type="password",
432
- label="Admin Password",
433
- placeholder="Enter admin password"
434
- )
435
- login_btn = gr.Button("Login", variant="primary")
436
- login_status = gr.Markdown("")
437
-
438
- # Browse Models tab
439
- with gr.Tab("Browse Models") as browse_tab:
440
- with gr.Row():
441
- # Filters column
442
- with gr.Column(scale=1):
443
- gr.Markdown("### Filters")
444
 
445
- family_dropdown = gr.Dropdown(
446
- choices=["All"],
447
- value="All",
448
- label="Model Family"
449
- )
450
 
451
- parameter_size_dropdown = gr.Dropdown(
452
- choices=["All"],
453
- value="All",
454
- label="Parameter Size"
455
- )
456
 
457
- name_search = gr.Textbox(
458
- label="Search by Name",
459
- placeholder="Search model names..."
460
- )
461
 
462
- search_btn = gr.Button("Search", variant="primary")
463
-
464
- stats_box = gr.Markdown("Loading stats...")
465
-
466
- # Results column
467
- with gr.Column(scale=2):
468
- gr.Markdown("### Results")
469
-
470
- results_table = gr.DataFrame(
471
- label="Models",
472
- interactive=False
473
- )
474
-
475
- model_json_display = gr.JSON(
476
- label="Model Details",
477
- visible=True
478
- )
479
-
480
- # Shodan Scan tab (admin only)
481
- with gr.Tab("Shodan Scan", visible=False) as scan_tab:
482
- with gr.Group():
483
- gr.Markdown("### Shodan Scan")
484
- gr.Markdown("Scan for publicly accessible Ollama instances using Shodan.")
485
-
486
- scan_btn = gr.Button("Start Scan", variant="primary")
487
- scan_progress = gr.Textbox(
488
- label="Scan Status",
489
- placeholder="Click 'Start Scan' to begin scanning...",
490
- interactive=False
491
- )
492
-
493
- # Login logic
494
- def handle_login(password):
495
- is_admin, status, message = login_submit(password)
496
- if is_admin:
497
- return is_admin, message, gr.update(visible=True)
498
- else:
499
- return is_admin, message, gr.update(visible=False)
500
-
501
- login_btn.click(
502
- handle_login,
503
- inputs=[admin_password],
504
- outputs=[admin_logged_in, login_status, scan_tab]
 
 
 
 
 
 
505
  )
506
-
507
- # Search logic
508
- def update_stats(dataset):
509
- total_instances = len(dataset)
510
- models_count = sum(len(instance.get('models', [])) for instance in dataset)
 
 
 
 
 
 
 
 
511
 
512
- families, parameter_sizes = get_unique_values(dataset)
513
- family_count = len(families) - 1 # Subtract "All"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
514
 
515
- return f"**Stats:** {total_instances} instances, {models_count} models, {family_count} families"
516
-
517
- search_btn.click(
518
- search_models,
519
- inputs=[family_dropdown, parameter_size_dropdown, name_search, admin_logged_in, dataset_state],
520
- outputs=[results_table, model_details_state]
521
- )
522
-
523
- # Model selection logic
524
- results_table.select(
525
- show_model_details,
526
- inputs=[model_details_state],
527
- outputs=[model_json_display]
528
- )
529
-
530
- # Scan logic
531
- async def run_scan():
532
- result, updated_dataset = await scan_ollama_instances()
533
- if updated_dataset is not None:
534
- return result, updated_dataset, *get_unique_values(updated_dataset), update_stats(updated_dataset)
535
- else:
536
- return result, None, [], [], ""
537
-
538
- scan_btn.click(
539
- run_scan,
540
- inputs=[],
541
- outputs=[scan_progress, dataset_state, family_dropdown, parameter_size_dropdown, stats_box]
542
- )
543
-
544
- # Initial data load
545
- def init_ui(dataset):
546
- families, parameter_sizes = get_unique_values(dataset)
547
- stats = update_stats(dataset)
548
 
549
- # Run initial search
550
- df, details = search_models("All", "All", "", False, dataset)
551
 
552
- return families, parameter_sizes, stats, df, details
553
-
554
- app.load(
555
- init_ui,
556
- inputs=[dataset_state],
557
- outputs=[family_dropdown, parameter_size_dropdown, stats_box, results_table, model_details_state]
558
- )
559
-
560
- return app
561
-
562
- # Start the application
563
- if __name__ == "__main__":
564
- app = create_app()
565
- app.launch()
566
- ):
567
- is_valid = verify_password(password, stored_password)
568
- else:
569
- # Compare directly for first-time setup
570
- is_valid = password == stored_password
571
 
572
- # Hash the password for future use
573
- if is_valid:
574
- hashed_password = hash_password(stored_password)
575
- logger.info("Hashed admin password for future use")
576
- # Note: We can't store the hashed password back to environment variables
577
- # in a Hugging Face Space environment. This would require a different approach.
578
 
579
- if is_valid:
580
- return True, "success", "Login successful! You now have admin access."
581
- else:
582
- return False, "error", "Invalid password. Please try again."
583
 
584
- except Exception as e:
585
- logger.exception("Login error:")
586
- return False, "error", f"Login error: {str(e)}"
587
-
588
- def search_models(
589
- family: str,
590
- parameter_size: str,
591
- name_search: str,
592
- is_admin: bool,
593
- dataset: Dataset
594
- ) -> Tuple[pd.DataFrame, List[Dict[str, Any]]]:
595
- """Search and filter models in the dataset."""
596
- df, details = filter_and_search_models(
597
- dataset,
598
- family=None if family == "All" else family,
599
- parameter_size=None if parameter_size == "All" else parameter_size,
600
- name_search=name_search,
601
- is_admin=is_admin
602
- )
603
- return df, details
604
-
605
- def show_model_details(evt: gr.SelectData, models: List[Dict[str, Any]]) -> Dict[str, Any]:
606
- """Show details for a selected model."""
607
- if not models or evt.index[0] >= len(models):
608
- return {}
609
-
610
- model = models[evt.index[0]]
611
- return model
612
-
613
- # Main Gradio application
614
- def create_app():
615
- with gr.Blocks(title="Ollama Scanner", theme=gr.themes.Soft()) as app:
616
  # State variables
617
- admin_logged_in = gr.State(False)
618
- dataset_state = gr.State(get_dataset())
619
- model_details_state = gr.State([])
620
-
621
- # Header
622
- gr.Markdown("# 🔍 Ollama Scanner")
623
- gr.Markdown("Browse publicly accessible Ollama instances and their models")
624
-
625
- # Login tab
626
- with gr.Tab("Admin Login") as login_tab:
627
- with gr.Group():
628
- gr.Markdown("### Admin Login")
629
- gr.Markdown("Enter the admin password to access administrative features.")
630
-
631
- admin_password = gr.Textbox(
632
- type="password",
633
- label="Admin Password",
634
- placeholder="Enter admin password"
635
- )
636
- login_btn = gr.Button("Login", variant="primary")
637
- login_status = gr.Markdown("")
638
 
639
- # Browse Models tab
640
- with gr.Tab("Browse Models") as browse_tab:
641
  with gr.Row():
642
- # Filters column
643
- with gr.Column(scale=1):
644
- gr.Markdown("### Filters")
 
 
 
 
 
 
 
 
645
 
 
646
  family_dropdown = gr.Dropdown(
647
- choices=["All"],
648
  value="All",
649
  label="Model Family"
650
  )
651
-
652
- parameter_size_dropdown = gr.Dropdown(
653
- choices=["All"],
654
  value="All",
655
  label="Parameter Size"
656
  )
657
-
658
  name_search = gr.Textbox(
659
- label="Search by Name",
660
- placeholder="Search model names..."
661
  )
662
-
663
- search_btn = gr.Button("Search", variant="primary")
664
-
665
- stats_box = gr.Markdown("Loading stats...")
666
-
667
- # Results column
668
- with gr.Column(scale=2):
669
- gr.Markdown("### Results")
670
-
671
- results_table = gr.DataFrame(
672
- label="Models",
673
- interactive=False
674
- )
675
-
676
- model_json_display = gr.JSON(
677
- label="Model Details",
678
- visible=True
679
- )
680
-
681
- # Shodan Scan tab (admin only)
682
- with gr.Tab("Shodan Scan", visible=False) as scan_tab:
683
- with gr.Group():
684
- gr.Markdown("### Shodan Scan")
685
- gr.Markdown("Scan for publicly accessible Ollama instances using Shodan.")
686
-
687
- scan_btn = gr.Button("Start Scan", variant="primary")
688
- scan_progress = gr.Textbox(
689
- label="Scan Status",
690
- placeholder="Click 'Start Scan' to begin scanning...",
691
- interactive=False
692
- )
693
-
694
- # Login logic
695
- def handle_login(password):
696
- is_admin, status, message = login_submit(password)
697
- if is_admin:
698
- return is_admin, message, gr.update(visible=True)
699
- else:
700
- return is_admin, message, gr.update(visible=False)
701
-
702
- login_btn.click(
703
- handle_login,
704
- inputs=[admin_password],
705
- outputs=[admin_logged_in, login_status, scan_tab]
706
- )
707
-
708
- # Search logic
709
- def update_stats(dataset):
710
- total_instances = len(dataset)
711
- models_count = sum(len(instance.get('models', [])) for instance in dataset)
712
 
713
- families, parameter_sizes = get_unique_values(dataset)
714
- family_count = len(families) - 1 # Subtract "All"
715
 
716
- return f"**Stats:** {total_instances} instances, {models_count} models, {family_count} families"
717
-
718
- search_btn.click(
719
- search_models,
720
- inputs=[family_dropdown, parameter_size_dropdown, name_search, admin_logged_in, dataset_state],
721
- outputs=[results_table, model_details_state]
722
- )
723
-
724
- # Model selection logic
725
- results_table.select(
726
- show_model_details,
727
- inputs=[model_details_state],
728
- outputs=[model_json_display]
729
- )
730
-
731
- # Scan logic
732
- async def run_scan():
733
- result, updated_dataset = await scan_ollama_instances()
734
- if updated_dataset is not None:
735
- return result, updated_dataset, *get_unique_values(updated_dataset), update_stats(updated_dataset)
736
- else:
737
- return result, None, [], [], ""
738
-
739
- scan_btn.click(
740
- run_scan,
741
- inputs=[],
742
- outputs=[scan_progress, dataset_state, family_dropdown, parameter_size_dropdown, stats_box]
743
- )
744
-
745
- # Initial data load
746
- def init_ui(dataset):
747
- families, parameter_sizes = get_unique_values(dataset)
748
- stats = update_stats(dataset)
749
 
750
- # Run initial search
751
- df, details = search_models("All", "All", "", False, dataset)
752
 
753
- return families, parameter_sizes, stats, df, details
754
-
755
- app.load(
756
- init_ui,
757
- inputs=[dataset_state],
758
- outputs=[family_dropdown, parameter_size_dropdown, stats_box, results_table, model_details_state]
759
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
760
 
761
  return app
762
 
763
- # Start the application
 
 
 
 
764
  if __name__ == "__main__":
765
- app = create_app()
766
- app.launch()
 
 
 
 
 
 
 
1
  import os
2
+ import logging
3
  import requests
4
+ import bcrypt
5
+ import gradio as gr
 
6
  from huggingface_hub import HfApi, login
7
+ from datasets import load_dataset, Dataset, Features, Value, Sequence
8
+ from typing import Dict, List, Optional, Any
9
+ import time
10
+ from concurrent.futures import ThreadPoolExecutor, as_completed
11
+ import shodan
12
+ import html
13
 
14
  # Configure logging
15
  logging.basicConfig(
16
  level=logging.INFO,
17
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
 
18
  )
19
+ logger = logging.getLogger(__name__)
20
 
21
+ # Function to access secrets
22
+ def get_secret(name: str) -> str:
23
+ """Get a secret from Hugging Face Space secrets."""
 
 
 
 
24
  try:
25
+ from huggingface_hub import secrets
26
+ return secrets.get_secret(name)
 
 
 
27
  except Exception as e:
28
+ logger.warning(f"Error accessing secret {name}: {e}")
29
+ return ""
30
 
31
+ # Sanitize inputs
32
+ def sanitize_input(text: str) -> str:
33
+ """Sanitize user input to prevent injection attacks."""
34
+ if not text:
35
+ return ""
36
+ return html.escape(text)
37
 
38
+ # Dataset functions
39
+ def get_or_create_dataset(repo_id: str, token: Optional[str] = None) -> Dataset:
40
+ """
41
+ Get or create a dataset with the specified schema.
42
+
43
+ Args:
44
+ repo_id: The Hugging Face dataset repository ID
45
+ token: Authentication token for private datasets
46
+
47
+ Returns:
48
+ The dataset object
49
+ """
50
  try:
51
+ # Try to load the dataset
52
+ dataset = load_dataset(repo_id, token=token)
 
 
53
  return dataset["train"]
54
  except Exception as e:
55
+ logger.info(f"Dataset {repo_id} not found or error loading it: {e}")
56
+
57
+ # Create a new dataset with the required schema
58
+ features = Features({
59
+ "ip": Value("string"),
60
+ "port": Value("int32"),
61
+ "country": Value("string", default_value=""),
62
+ "region": Value("string", default_value=""),
63
+ "org": Value("string", default_value=""),
64
+ "models": Sequence({
65
+ "name": Value("string"),
66
+ "family": Value("string", default_value=""),
67
+ "parameter_size": Value("string", default_value=""),
68
+ "quantization_level": Value("string", default_value=""),
69
+ "digest": Value("string", default_value=""),
70
+ "modified_at": Value("string", default_value=""),
71
+ "size": Value("int64", default_value=0)
72
+ }, default_value=[])
73
+ })
74
+
75
+ # Create empty dataset
76
+ empty_dataset = Dataset.from_dict({
77
+ "ip": [],
78
+ "port": [],
79
+ "country": [],
80
+ "region": [],
81
+ "org": [],
82
+ "models": []
83
+ }, features=features)
84
+
85
+ # Push to hub
86
+ empty_dataset.push_to_hub(repo_id, token=token)
87
+
88
+ return empty_dataset
89
 
90
+ def update_dataset_entry(dataset: Dataset, entry: Dict[str, Any]) -> Dataset:
91
+ """
92
+ Add or update an entry in the dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
+ Args:
95
+ dataset: The dataset to update
96
+ entry: The entry to add or update
97
+
98
+ Returns:
99
+ The updated dataset
100
+ """
101
+ # Check if entry already exists by IP and port
102
+ ip = entry["ip"]
103
+ port = entry["port"]
104
 
105
+ # Find the index if it exists
106
+ existing_idx = None
107
+ for idx, item in enumerate(dataset):
108
+ if item["ip"] == ip and item["port"] == port:
109
+ existing_idx = idx
110
+ break
111
 
112
+ if existing_idx is not None:
113
+ # Update existing entry
114
+ dataset = dataset.select(list(range(len(dataset))))
115
+ new_examples = list(dataset)
116
+ new_examples[existing_idx] = entry
117
+ return Dataset.from_dict({k: [ex[k] for ex in new_examples] for k in dataset.column_names})
 
118
  else:
119
+ # Add new entry
120
+ new_dataset = dataset.add_item(entry)
121
+ return new_dataset
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
+ def push_dataset_to_hub(dataset: Dataset, repo_id: str, token: Optional[str] = None):
124
+ """
125
+ Push dataset to the Hugging Face Hub.
 
 
 
 
126
 
127
+ Args:
128
+ dataset: The dataset to push
129
+ repo_id: The repository ID
130
+ token: Authentication token
131
+ """
132
+ try:
133
+ dataset.push_to_hub(repo_id, token=token)
134
+ logger.info(f"Successfully pushed dataset to {repo_id}")
135
+ except Exception as e:
136
+ logger.error(f"Error pushing dataset to hub: {e}")
137
+
138
+ # Shodan functions
139
+ def scan_with_shodan(shodan_api_key: str, query: str, max_results: int = 1000) -> List[Dict[str, Any]]:
140
+ """
141
+ Scan with Shodan API for Ollama instances.
142
 
143
+ Args:
144
+ shodan_api_key: Shodan API key
145
+ query: Shodan search query
146
+ max_results: Maximum number of results to return
147
+
148
+ Returns:
149
+ List of discovered instances
150
+ """
151
+ if not shodan_api_key:
152
+ logger.error("No Shodan API key provided")
153
+ return []
154
+
155
+ try:
156
+ api = shodan.Shodan(shodan_api_key)
157
+ results = []
158
+
159
+ # Get the number of total results
160
+ count_result = api.count(query)
161
+ total_results = count_result['total']
162
+ logger.info(f"Found {total_results} results for query: {query}")
163
+
164
+ # Limit to max_results
165
+ pages = min(total_results, max_results) // 100
166
+ if min(total_results, max_results) % 100 > 0:
167
+ pages += 1
168
+
169
+ for page in range(1, pages + 1):
170
+ try:
171
+ result_page = api.search(query, page=page)
172
+ for match in result_page['matches']:
173
+ instance = {
174
+ "ip": match.get("ip_str", ""),
175
+ "port": match.get("port", 11434), # Default Ollama port
176
+ "country": match.get("location", {}).get("country_name", ""),
177
+ "region": match.get("location", {}).get("region_name", ""),
178
+ "org": match.get("org", ""),
179
+ "models": [] # Will be populated later
180
+ }
181
+ results.append(instance)
182
+ logger.info(f"Processed page {page}/{pages}")
183
+ except shodan.APIError as e:
184
+ logger.error(f"Shodan API error on page {page}: {e}")
185
+ break
186
+
187
+ return results
188
+ except shodan.APIError as e:
189
+ logger.error(f"Shodan API error: {e}")
190
+ return []
191
 
192
+ # Ollama endpoint checking
193
+ def check_ollama_endpoint(instance: Dict[str, Any], timeout: int = 5) -> Dict[str, Any]:
194
+ """
195
+ Check an Ollama endpoint and extract model information.
196
+
197
+ Args:
198
+ instance: Instance information (ip, port, etc.)
199
+ timeout: Request timeout in seconds
200
+
201
+ Returns:
202
+ Updated instance with model information
203
+ """
204
+ ip = instance["ip"]
205
+ port = instance["port"]
206
  url = f"http://{ip}:{port}/api/tags"
207
 
208
+ updated_instance = instance.copy()
209
+
210
  try:
211
+ response = requests.get(url, timeout=timeout)
 
212
  response.raise_for_status()
213
 
 
214
  data = response.json()
215
 
216
+ if "models" in data:
217
+ models_list = []
218
+ for model in data["models"]:
219
+ # Extract model details
220
+ model_info = {
221
+ "name": model.get("name", ""),
222
+ "family": model.get("details", {}).get("family", ""),
223
+ "parameter_size": model.get("details", {}).get("parameter_size", ""),
224
+ "quantization_level": model.get("details", {}).get("quantization_level", ""),
225
+ "digest": model.get("digest", ""),
226
+ "modified_at": model.get("modified_at", ""),
227
+ "size": model.get("size", 0)
228
+ }
229
+ models_list.append(model_info)
230
+
231
+ updated_instance["models"] = models_list
232
+ logger.info(f"Successfully extracted {len(models_list)} models from {ip}:{port}")
233
+ else:
234
+ logger.warning(f"No models found in response from {ip}:{port}")
235
+ updated_instance["models"] = []
236
+
237
  except requests.exceptions.RequestException as e:
238
+ logger.error(f"Network error for {ip}:{port}: {e}")
239
+ updated_instance["models"] = []
 
 
 
240
  except ValueError as e:
241
  logger.error(f"Invalid JSON from {ip}:{port}: {e}")
242
+ updated_instance["models"] = []
 
 
 
243
  except Exception as e:
244
+ logger.exception(f"Unexpected error for {ip}:{port}: {e}")
245
+ updated_instance["models"] = []
 
 
 
 
 
 
 
246
 
247
+ return updated_instance
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
 
249
+ # Authentication functions
250
+ def verify_password(password: str, stored_password: str) -> bool:
251
+ """
252
+ Verify if the entered password matches the stored password.
 
 
 
 
 
 
253
 
254
+ Args:
255
+ password: The entered password
256
+ stored_password: The stored password (hashed or plaintext)
257
+
258
+ Returns:
259
+ True if passwords match, False otherwise
260
+ """
261
+ # Sanitize input
262
+ password = sanitize_input(password)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
 
264
+ # Check if stored password is a bcrypt hash
265
+ if stored_password.startswith('$2b$'):
266
+ return bcrypt.checkpw(password.encode('utf-8'), stored_password.encode('utf-8'))
267
+ else:
268
+ # Direct comparison for development/testing
269
+ return password == stored_password
270
+
271
+ # UI creation
272
+ def create_ui():
273
+ """
274
+ Create the Gradio UI for the application.
275
 
276
+ Returns:
277
+ Gradio interface
278
+ """
279
+ # Get secrets
280
+ admin_password = get_secret("ADMIN_PASSWORD")
281
+ if not admin_password:
282
+ admin_password = "admin" # Default for development (should be replaced in production)
283
+ logger.warning("Admin password not set, using default (insecure)")
284
 
285
+ shodan_api_key = get_secret("SHODAN_API_KEY")
286
+ if not shodan_api_key:
287
+ logger.warning("Shodan API key not set, scans will not work")
288
 
289
+ shodan_query = get_secret("SHODAN_QUERY")
290
+ if not shodan_query:
291
+ shodan_query = "product:Ollama port:11434"
292
+ logger.info(f"Using default Shodan query: {shodan_query}")
293
 
294
+ hf_token = get_secret("HF_TOKEN")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
295
 
296
+ # Load dataset
297
+ dataset_repo_id = "latterworks/llama_checker_results"
298
+ dataset = get_or_create_dataset(dataset_repo_id, token=hf_token)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299
 
300
+ # Function to search and display models
301
+ def search_models(family, param_size, name, current_dataset):
302
+ # Sanitize inputs
303
+ name = sanitize_input(name)
304
+
305
+ results = []
306
+
307
+ for item in current_dataset:
308
+ if item["models"]:
309
+ for model in item["models"]:
310
+ # Apply filters
311
+ if family != "All" and model["family"] != family:
312
+ continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313
 
314
+ if param_size != "All" and model["parameter_size"] != param_size:
315
+ continue
 
 
 
316
 
317
+ if name and name.lower() not in model["name"].lower():
318
+ continue
 
 
 
319
 
320
+ # Calculate size in GB
321
+ size_gb = round(model["size"] / (1024 * 1024 * 1024), 2) if model["size"] else 0
 
 
322
 
323
+ # Add to results
324
+ results.append([
325
+ model["name"],
326
+ model["family"],
327
+ model["parameter_size"],
328
+ model["quantization_level"],
329
+ size_gb
330
+ ])
331
+
332
+ return results
333
+
334
+ # Function to display model details
335
+ def show_model_details(evt: gr.SelectData, results, is_admin, current_dataset):
336
+ selected_row = evt.index[0]
337
+ model_name = results[selected_row][0]
338
+
339
+ # Find the model
340
+ for item in current_dataset:
341
+ if item["models"]:
342
+ for model in item["models"]:
343
+ if model["name"] == model_name:
344
+ details = {
345
+ "name": model["name"],
346
+ "family": model["family"],
347
+ "parameter_size": model["parameter_size"],
348
+ "quantization_level": model["quantization_level"],
349
+ "digest": model["digest"],
350
+ "modified_at": model["modified_at"],
351
+ "size_bytes": model["size"],
352
+ "size_gb": round(model["size"] / (1024 * 1024 * 1024), 2) if model["size"] else 0
353
+ }
354
+
355
+ # Include IP and port for admin users
356
+ if is_admin:
357
+ details["ip"] = item["ip"]
358
+ details["port"] = item["port"]
359
+ details["country"] = item["country"]
360
+ details["region"] = item["region"]
361
+ details["org"] = item["org"]
362
+
363
+ return details
364
+
365
+ return {"error": "Model not found"}
366
+
367
+ # Function to update admin visibility
368
+ def update_admin_visibility(is_admin):
369
+ return (
370
+ gr.update(visible=not is_admin), # admin_required
371
+ gr.update(visible=is_admin) # scan_group
372
  )
373
+
374
+ # Function to perform scan
375
+ def perform_scan(max_results, is_admin, current_dataset):
376
+ if not is_admin:
377
+ return "⚠️ Admin login required", [], current_dataset
378
+
379
+ # Start scan
380
+ yield "🔍 Starting Shodan scan...", [], current_dataset
381
+
382
+ try:
383
+ # Get instances from Shodan
384
+ instances = scan_with_shodan(shodan_api_key, shodan_query, max_results)
385
+ yield f"🔍 Found {len(instances)} instances. Checking endpoints...", [], current_dataset
386
 
387
+ # Check endpoints using executor
388
+ updated_instances = []
389
+ with ThreadPoolExecutor(max_workers=10) as executor:
390
+ # Create future tasks
391
+ future_to_instance = {
392
+ executor.submit(check_ollama_endpoint, instance): instance
393
+ for instance in instances
394
+ }
395
+
396
+ # Process completed tasks
397
+ for future in as_completed(future_to_instance):
398
+ try:
399
+ updated_instance = future.result()
400
+ updated_instances.append(updated_instance)
401
+ except Exception as e:
402
+ instance = future_to_instance[future]
403
+ logger.exception(f"Error processing {instance['ip']}:{instance['port']}: {e}")
404
+ # In case of error, append the original instance without model info
405
+ instance["models"] = []
406
+ updated_instances.append(instance)
407
 
408
+ # Update dataset
409
+ updated_dataset = current_dataset
410
+ for instance in updated_instances:
411
+ updated_dataset = update_dataset_entry(updated_dataset, instance)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
412
 
413
+ # Push to hub
414
+ push_dataset_to_hub(updated_dataset, dataset_repo_id, token=hf_token)
415
 
416
+ # Prepare results for display
417
+ results = []
418
+ total_models = 0
419
+ for instance in updated_instances:
420
+ models_count = len(instance["models"]) if instance["models"] else 0
421
+ total_models += models_count
422
+ results.append([
423
+ instance["ip"],
424
+ instance["port"],
425
+ instance["country"],
426
+ instance["region"],
427
+ instance["org"],
428
+ models_count
429
+ ])
 
 
 
 
 
430
 
431
+ yield f"✅ Scan completed! Found {len(instances)} instances with a total of {total_models} models.", results, updated_dataset
 
 
 
 
 
432
 
433
+ except Exception as e:
434
+ logger.exception(f"Error during scan: {e}")
435
+ yield f"❌ Error during scan: {str(e)}", [], current_dataset
 
436
 
437
+ # Create Gradio UI
438
+ with gr.Blocks(title="Ollama Instance Scanner") as app:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
439
  # State variables
440
+ current_dataset = gr.State(dataset)
441
+ is_admin = gr.State(False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
442
 
443
+ with gr.Tab("Browse Models"):
444
+ # Filters
445
  with gr.Row():
446
+ with gr.Column():
447
+ # Extract unique values for family and parameter_size
448
+ families = set()
449
+ parameter_sizes = set()
450
+ for item in dataset:
451
+ if item["models"]:
452
+ for model in item["models"]:
453
+ if model["family"]:
454
+ families.add(model["family"])
455
+ if model["parameter_size"]:
456
+ parameter_sizes.add(model["parameter_size"])
457
 
458
+ # Create dropdowns
459
  family_dropdown = gr.Dropdown(
460
+ choices=["All"] + sorted(list(families)),
461
  value="All",
462
  label="Model Family"
463
  )
464
+ param_size_dropdown = gr.Dropdown(
465
+ choices=["All"] + sorted(list(parameter_sizes)),
 
466
  value="All",
467
  label="Parameter Size"
468
  )
 
469
  name_search = gr.Textbox(
470
+ value="",
471
+ label="Model Name Contains"
472
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
473
 
474
+ # Search button
475
+ search_btn = gr.Button("Search")
476
 
477
+ # Results dataframe
478
+ results_df = gr.DataFrame(
479
+ value=[],
480
+ headers=["Name", "Family", "Parameter Size", "Quantization", "Size (GB)"],
481
+ label="Model Results"
482
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
483
 
484
+ # Detailed info JSON viewer
485
+ model_details = gr.JSON(label="Model Details")
486
 
487
+ # Connect events
488
+ search_btn.click(
489
+ search_models,
490
+ inputs=[family_dropdown, param_size_dropdown, name_search, current_dataset],
491
+ outputs=[results_df]
492
+ )
493
+
494
+ results_df.select(
495
+ show_model_details,
496
+ inputs=[results_df, is_admin, current_dataset],
497
+ outputs=[model_details]
498
+ )
499
+
500
+ with gr.Tab("Admin Login"):
501
+ admin_pw_input = gr.Textbox(
502
+ value="",
503
+ type="password",
504
+ label="Admin Password"
505
+ )
506
+ login_btn = gr.Button("Login")
507
+ login_status = gr.Markdown("Not logged in")
508
+
509
+ def admin_login(password):
510
+ if verify_password(password, admin_password):
511
+ return "✅ Successfully logged in as admin", True
512
+ else:
513
+ return "❌ Invalid password", False
514
+
515
+ login_btn.click(
516
+ admin_login,
517
+ inputs=[admin_pw_input],
518
+ outputs=[login_status, is_admin]
519
+ )
520
+
521
+ with gr.Tab("Shodan Scan") as shodan_tab:
522
+ # This tab is initially hidden and only shown to admins
523
+ admin_required = gr.Markdown("⚠️ Admin login required to access this feature")
524
+
525
+ with gr.Group(visible=False) as scan_group:
526
+ max_results = gr.Slider(
527
+ minimum=10,
528
+ maximum=1000,
529
+ value=100,
530
+ step=10,
531
+ label="Max Results"
532
+ )
533
+ scan_btn = gr.Button("Start Scan")
534
+ scan_status = gr.Markdown("Ready to scan")
535
+
536
+ # Admin results dataframe with IP and port
537
+ admin_results_df = gr.DataFrame(
538
+ value=[],
539
+ headers=["IP", "Port", "Country", "Region", "Organization", "Models Count"],
540
+ label="Scan Results"
541
+ )
542
+
543
+ # Connect events
544
+ is_admin.change(
545
+ update_admin_visibility,
546
+ inputs=[is_admin],
547
+ outputs=[admin_required, scan_group]
548
+ )
549
+
550
+ scan_btn.click(
551
+ perform_scan,
552
+ inputs=[max_results, is_admin, current_dataset],
553
+ outputs=[scan_status, admin_results_df, current_dataset]
554
+ )
555
 
556
  return app
557
 
558
+ # Main function
559
+ def main():
560
+ app = create_ui()
561
+ app.launch()
562
+
563
  if __name__ == "__main__":
564
+ main()