xingqiang commited on
Commit
4d5fc2a
·
1 Parent(s): e7d85ce

Initial commit

Browse files
Files changed (4) hide show
  1. README.md +63 -6
  2. app.py +137 -74
  3. model.py +73 -13
  4. requirements.txt +13 -11
README.md CHANGED
@@ -1,14 +1,71 @@
1
  ---
2
- title: Detection Repoter
3
- emoji: 💻
4
- colorFrom: green
5
- colorTo: pink
6
  sdk: gradio
7
- sdk_version: 5.0.1
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
- short_description: 'Reporter generation of radar deatection '
12
  ---
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Radar Image Analysis System
3
+ emoji: 📡
4
+ colorFrom: blue
5
+ colorTo: red
6
  sdk: gradio
7
+ sdk_version: 3.50.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
 
11
  ---
12
 
13
+ # Radar Image Analysis System
14
+
15
+ This Hugging Face Space provides an interactive interface for analyzing radar images using deep learning and signal processing techniques. The system can detect defects, analyze signal characteristics, and generate detailed reports.
16
+
17
+ ## Features
18
+
19
+ - **Defect Detection**: Uses PaliGemma model to detect and localize defects in radar images
20
+ - **Signal Analysis**: Analyzes key signal characteristics:
21
+ - Amplitude analysis
22
+ - Distribution range measurement
23
+ - Attenuation rate calculation
24
+ - Reflection count analysis
25
+ - **Report Generation**: Creates detailed reports with visualizations
26
+ - **History Tracking**: Maintains a history of analyzed images and findings
27
+
28
+ ## How to Use
29
+
30
+ 1. Upload a radar image using the interface
31
+ 2. Click "Analyze" to process the image
32
+ 3. View the detection results and analysis report
33
+ 4. Access previous analyses through the history feature
34
+
35
+ ## Technical Details
36
+
37
+ - **Model**: PaliGemma-3b fine-tuned for radar defect detection
38
+ - **Framework**: PyTorch + Transformers
39
+ - **Interface**: Gradio
40
+ - **Signal Processing**: Custom algorithms for radar signal analysis
41
+
42
+ ## Environment Variables
43
+
44
+ The following environment variables need to be set in your Space:
45
+
46
+ - `HF_TOKEN`: Your Hugging Face token for accessing the model
47
+ - `DATABASE_URL` (optional): URL for the database connection
48
+
49
+ ## Requirements
50
+
51
+ See `requirements.txt` for the complete list of dependencies.
52
+
53
+ ## License
54
+
55
+ MIT License
56
+
57
+ ## Citation
58
+
59
+ If you use this system in your work, please cite:
60
+
61
+ ```bibtex
62
+ @software{radar_analysis_system,
63
+ title = {Radar Image Analysis System},
64
+ author = {Chen, Xingqiang},
65
+ year = {2024},
66
+ publisher = {Hugging Face Spaces},
67
+ url = {https://huggingface.co/spaces/[your-username]/radar-analysis}
68
+ }
69
+ ```
70
+
71
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,91 +1,154 @@
1
  import gradio as gr
2
  import torch
3
  from PIL import Image
4
- import io
5
- import pandas as pd
 
6
 
7
  from model import RadarDetectionModel
8
  from feature_extraction import (calculate_amplitude, classify_amplitude,
9
- calculate_distribution_range, classify_distribution_range,
10
- calculate_attenuation_rate, classify_attenuation_rate,
11
- count_reflections, classify_reflections)
12
- from report_generation import generate_report
13
  from utils import plot_detection
14
  from database import save_report, get_report_history
15
- from report_generation import render_report
16
- model = RadarDetectionModel()
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  def process_image(image):
20
- detection_result = model.detect(image)
21
-
22
- np_image = np.array(image)
23
- amplitude = calculate_amplitude(np_image)
24
- amplitude_class = classify_amplitude(amplitude)
25
-
26
- box = detection_result['boxes'][0].tolist()
27
- distribution_range = calculate_distribution_range(box)
28
- distribution_class = classify_distribution_range(distribution_range)
29
-
30
- attenuation_rate = calculate_attenuation_rate(np_image)
31
- attenuation_class = classify_attenuation_rate(attenuation_rate)
32
-
33
- reflection_count = count_reflections(np_image)
34
- reflection_class = classify_reflections(reflection_count)
35
-
36
- features = {
37
- "振幅": amplitude_class,
38
- "分布范围": distribution_class,
39
- "衰减速度": attenuation_class,
40
- "反射次数": reflection_class
41
- }
42
-
43
- report = generate_report(detection_result, image, features)
44
-
45
- detection_image = plot_detection(image, detection_result)
46
-
47
- save_report(report)
48
-
49
- return detection_image, report
50
-
51
-
52
- def analyze_radar_image(image):
53
- detection_image, report = process_image(image)
54
- report_html = render_report(report)
55
- return detection_image, report_html
56
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
  def display_history():
59
- reports = get_report_history()
60
- history_html = "<div class='history-container'><h3>历史记录</h3>"
61
- for report in reports:
62
- history_html += f"""
63
- <div class='history-item'>
64
- <p><strong>报告ID:</strong> {report.report_id}</p>
65
- <p><strong>缺陷类型:</strong> {report.defect_type}</p>
66
- <p><strong>描述:</strong> {report.description}</p>
67
- <p><strong>创建时间:</strong> {report.created_at}</p>
68
- </div>
69
- """
70
- history_html += "</div>"
71
- return history_html
72
-
73
-
74
- with gr.Blocks(css="static/style.css") as iface:
75
- gr.Markdown("# 雷达图谱分析系统")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  with gr.Row():
77
  with gr.Column(scale=1):
78
- input_image = gr.Image(type="pil", label="上传雷达图谱")
79
- analyze_button = gr.Button("分析")
 
80
  with gr.Column(scale=2):
81
- output_image = gr.Image(type="pil", label="检测结果")
82
- output_report = gr.HTML(label="分析报告")
83
-
84
- history_button = gr.Button("查看历史记录")
85
- history_output = gr.HTML()
86
-
87
- analyze_button.click(analyze_radar_image, inputs=[
88
- input_image], outputs=[output_image, output_report])
89
- history_button.click(display_history, inputs=[], outputs=[history_output])
90
-
 
 
 
 
 
 
 
 
 
 
 
91
  iface.launch()
 
1
  import gradio as gr
2
  import torch
3
  from PIL import Image
4
+ import numpy as np
5
+ import os
6
+ from pathlib import Path
7
 
8
  from model import RadarDetectionModel
9
  from feature_extraction import (calculate_amplitude, classify_amplitude,
10
+ calculate_distribution_range, classify_distribution_range,
11
+ calculate_attenuation_rate, classify_attenuation_rate,
12
+ count_reflections, classify_reflections)
13
+ from report_generation import generate_report, render_report
14
  from utils import plot_detection
15
  from database import save_report, get_report_history
 
 
16
 
17
+ # Initialize model with HF token from environment
18
+ model = None
19
+ try:
20
+ model = RadarDetectionModel(use_auth_token=os.getenv("HF_TOKEN"))
21
+ except Exception as e:
22
+ print(f"Warning: Model initialization failed: {str(e)}")
23
+ print("The app will initialize the model on first request.")
24
+
25
+ def initialize_model():
26
+ global model
27
+ if model is None:
28
+ try:
29
+ model = RadarDetectionModel(use_auth_token=os.getenv("HF_TOKEN"))
30
+ except Exception as e:
31
+ return None, f"Error initializing model: {str(e)}"
32
+ return model, None
33
 
34
  def process_image(image):
35
+ if image is None:
36
+ return None, "Please upload an image."
37
+
38
+ # Initialize model if needed
39
+ global model
40
+ model, error = initialize_model()
41
+ if error:
42
+ return None, error
43
+
44
+ try:
45
+ # Convert to PIL Image if needed
46
+ if isinstance(image, np.ndarray):
47
+ image = Image.fromarray(image)
48
+
49
+ # Run detection
50
+ detection_result = model.detect(image)
51
+
52
+ # Extract features
53
+ np_image = np.array(image)
54
+ amplitude = calculate_amplitude(np_image)
55
+ amplitude_class = classify_amplitude(amplitude)
56
+
57
+ if len(detection_result['boxes']) > 0:
58
+ box = detection_result['boxes'][0].tolist()
59
+ distribution_range = calculate_distribution_range(box)
60
+ distribution_class = classify_distribution_range(distribution_range)
61
+ else:
62
+ distribution_class = "No defects detected"
63
+
64
+ attenuation_rate = calculate_attenuation_rate(np_image)
65
+ attenuation_class = classify_attenuation_rate(attenuation_rate)
66
+
67
+ reflection_count = count_reflections(np_image)
68
+ reflection_class = classify_reflections(reflection_count)
69
+
70
+ features = {
71
+ "Amplitude": amplitude_class,
72
+ "Distribution Range": distribution_class,
73
+ "Attenuation Rate": attenuation_class,
74
+ "Reflection Count": reflection_class
75
+ }
76
+
77
+ # Generate report and visualizations
78
+ report = generate_report(detection_result, image, features)
79
+ detection_image = plot_detection(image, detection_result)
80
+
81
+ # Save report if database is configured
82
+ try:
83
+ save_report(report)
84
+ except Exception as e:
85
+ print(f"Warning: Could not save report: {str(e)}")
86
+
87
+ return detection_image, render_report(report)
88
+
89
+ except Exception as e:
90
+ error_msg = f"Error processing image: {str(e)}"
91
+ print(error_msg)
92
+ return None, error_msg
93
 
94
  def display_history():
95
+ try:
96
+ reports = get_report_history()
97
+ history_html = "<div class='history-container'><h3>Analysis History</h3>"
98
+ for report in reports:
99
+ history_html += f"""
100
+ <div class='history-item'>
101
+ <p><strong>Report ID:</strong> {report.report_id}</p>
102
+ <p><strong>Defect Type:</strong> {report.defect_type}</p>
103
+ <p><strong>Description:</strong> {report.description}</p>
104
+ <p><strong>Created:</strong> {report.created_at}</p>
105
+ </div>
106
+ """
107
+ history_html += "</div>"
108
+ return history_html
109
+ except Exception as e:
110
+ return f"Error retrieving history: {str(e)}"
111
+
112
+ # Create Gradio interface
113
+ css = """
114
+ .gradio-container {max-width: 1200px !important}
115
+ .history-container {margin-top: 20px; padding: 10px;}
116
+ .history-item {
117
+ border: 1px solid #ddd;
118
+ padding: 10px;
119
+ margin: 10px 0;
120
+ border-radius: 5px;
121
+ }
122
+ """
123
+
124
+ with gr.Blocks(css=css) as iface:
125
+ gr.Markdown("# Radar Image Analysis System")
126
+
127
  with gr.Row():
128
  with gr.Column(scale=1):
129
+ input_image = gr.Image(type="pil", label="Upload Radar Image")
130
+ analyze_button = gr.Button("Analyze", variant="primary")
131
+
132
  with gr.Column(scale=2):
133
+ output_image = gr.Image(type="pil", label="Detection Result")
134
+ output_report = gr.HTML(label="Analysis Report")
135
+
136
+ with gr.Row():
137
+ history_button = gr.Button("View History")
138
+ history_output = gr.HTML()
139
+
140
+ # Set up event handlers
141
+ analyze_button.click(
142
+ fn=process_image,
143
+ inputs=[input_image],
144
+ outputs=[output_image, output_report]
145
+ )
146
+
147
+ history_button.click(
148
+ fn=display_history,
149
+ inputs=[],
150
+ outputs=[history_output]
151
+ )
152
+
153
+ # Launch the interface
154
  iface.launch()
model.py CHANGED
@@ -1,23 +1,83 @@
 
1
  from transformers import AutoFeatureExtractor, AutoModelForObjectDetection
2
  import torch
3
- from config import MODEL_NAME
 
4
 
 
5
 
6
  class RadarDetectionModel:
7
- def __init__(self):
8
- self.feature_extractor = AutoFeatureExtractor.from_pretrained(
9
- "google/paligemma-3b-ft-coco35l-224")
10
- self.model = AutoModelForObjectDetection.from_pretrained(
11
- "google/paligemma-3b-ft-coco35l-224")
12
- self.model.eval()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  @torch.no_grad()
15
  def detect(self, image):
16
- inputs = self.feature_extractor(images=image, return_tensors="pt")
17
- outputs = self.model(**inputs)
 
 
 
 
 
 
 
 
 
 
18
 
19
- target_sizes = torch.tensor([image.size[::-1]])
20
- results = self.feature_extractor.post_process_object_detection(
21
- outputs, threshold=0.5, target_sizes=target_sizes)[0]
 
22
 
23
- return results
 
 
 
 
 
1
+ import os
2
  from transformers import AutoFeatureExtractor, AutoModelForObjectDetection
3
  import torch
4
+ from huggingface_hub import login
5
+ import logging
6
 
7
+ logger = logging.getLogger(__name__)
8
 
9
  class RadarDetectionModel:
10
+ def __init__(self, model_name="google/paligemma-3b-ft-coco35l-224", use_auth_token=None):
11
+ """
12
+ Initialize the radar detection model.
13
+
14
+ Args:
15
+ model_name (str): Name of the model to load from HuggingFace
16
+ use_auth_token (str, optional): HuggingFace token for accessing gated models.
17
+ If None, will try to use HF_TOKEN environment variable.
18
+ """
19
+ self.model_name = model_name
20
+
21
+ # Get token from environment if not provided
22
+ if use_auth_token is None:
23
+ use_auth_token = os.getenv("HF_TOKEN")
24
+
25
+ try:
26
+ # Try to load the model with authentication
27
+ if use_auth_token:
28
+ logger.info("Attempting to load model with authentication token...")
29
+ login(use_auth_token)
30
+
31
+ self.feature_extractor = AutoFeatureExtractor.from_pretrained(
32
+ self.model_name,
33
+ use_auth_token=use_auth_token
34
+ )
35
+ self.model = AutoModelForObjectDetection.from_pretrained(
36
+ self.model_name,
37
+ use_auth_token=use_auth_token
38
+ )
39
+ self.model.eval()
40
+
41
+ except Exception as e:
42
+ logger.error(f"Error loading model: {str(e)}")
43
+ logger.error("""
44
+ Failed to load the model. This could be due to:
45
+ 1. Missing authentication token for gated model
46
+ 2. Invalid token
47
+ 3. No internet connection
48
+
49
+ Please ensure you have:
50
+ 1. Set the HF_TOKEN environment variable with your HuggingFace token
51
+ OR passed the token directly to the constructor
52
+ 2. Have a valid token with access to the model
53
+ 3. Are connected to the internet
54
+
55
+ You can get your token from: https://huggingface.co/settings/tokens
56
+ """)
57
+ raise
58
 
59
  @torch.no_grad()
60
  def detect(self, image):
61
+ """
62
+ Perform object detection on the input image.
63
+
64
+ Args:
65
+ image: PIL Image object
66
+
67
+ Returns:
68
+ dict: Detection results including boxes, scores, and labels
69
+ """
70
+ try:
71
+ inputs = self.feature_extractor(images=image, return_tensors="pt")
72
+ outputs = self.model(**inputs)
73
 
74
+ # Process the outputs
75
+ target_sizes = torch.tensor([image.size[::-1]])
76
+ results = self.feature_extractor.post_process_object_detection(
77
+ outputs, threshold=0.5, target_sizes=target_sizes)[0]
78
 
79
+ return results
80
+
81
+ except Exception as e:
82
+ logger.error(f"Error during detection: {str(e)}")
83
+ raise
requirements.txt CHANGED
@@ -1,11 +1,13 @@
1
- gradio==3.37.0
2
- torch==2.0.1
3
- transformers==4.31.0
4
- Pillow==9.5.0
5
- numpy==1.23.5
6
- matplotlib==3.7.1
7
- pandas==1.5.3
8
- sqlalchemy==2.0.19
9
- plotly==5.15.0
10
- scikit-learn==1.3.0
11
- jinja2==3.1.2
 
 
 
1
+ gradio>=3.50.0
2
+ torch>=2.0.1
3
+ transformers>=4.31.0
4
+ Pillow>=9.5.0
5
+ numpy>=1.23.5
6
+ matplotlib>=3.7.1
7
+ pandas>=1.5.3
8
+ sqlalchemy>=2.0.19
9
+ plotly>=5.15.0
10
+ scikit-learn>=1.3.0
11
+ jinja2>=3.1.2
12
+ huggingface-hub>=0.19.0
13
+ python-dotenv>=1.0.0