filtering code
Browse files- utils/keck_filtering.ipynb +251 -0
utils/keck_filtering.ipynb
ADDED
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 71,
|
6 |
+
"id": "b4c4c986",
|
7 |
+
"metadata": {},
|
8 |
+
"outputs": [],
|
9 |
+
"source": [
|
10 |
+
"import os\n",
|
11 |
+
"from tqdm import tqdm\n",
|
12 |
+
"import glob\n",
|
13 |
+
"from astropy.io import fits\n",
|
14 |
+
"import os\n",
|
15 |
+
"from astropy.io import fits\n",
|
16 |
+
"from astropy.wcs import WCS\n",
|
17 |
+
"from spherical_geometry.polygon import SphericalPolygon\n",
|
18 |
+
"import os\n",
|
19 |
+
"from astropy.io import fits\n",
|
20 |
+
"from astropy.wcs import WCS\n",
|
21 |
+
"from spherical_geometry.polygon import SphericalPolygon\n",
|
22 |
+
"from sklearn.cluster import AgglomerativeClustering\n",
|
23 |
+
"import matplotlib.pyplot as plt\n",
|
24 |
+
"import pandas as pd\n",
|
25 |
+
"from astropy.io import fits\n",
|
26 |
+
"import pandas as pd\n",
|
27 |
+
"import matplotlib.pyplot as plt\n",
|
28 |
+
"import numpy as np\n",
|
29 |
+
"import shutil\n",
|
30 |
+
"\n",
|
31 |
+
"def get_all_fits_files(root_dir):\n",
|
32 |
+
" # Use glob to recursively find all .fits files\n",
|
33 |
+
" pattern = os.path.join(root_dir, '**', '*LR*.fits')\n",
|
34 |
+
" fits_files = glob.glob(pattern, recursive=True)\n",
|
35 |
+
" return fits_files"
|
36 |
+
]
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"cell_type": "code",
|
40 |
+
"execution_count": 56,
|
41 |
+
"id": "ba3bf5f7",
|
42 |
+
"metadata": {},
|
43 |
+
"outputs": [
|
44 |
+
{
|
45 |
+
"data": {
|
46 |
+
"text/plain": [
|
47 |
+
"1014"
|
48 |
+
]
|
49 |
+
},
|
50 |
+
"execution_count": 56,
|
51 |
+
"metadata": {},
|
52 |
+
"output_type": "execute_result"
|
53 |
+
}
|
54 |
+
],
|
55 |
+
"source": [
|
56 |
+
"valid_fits_paths = get_all_fits_files('./GBI-16-2D/prelim_data')\n",
|
57 |
+
"len(valid_fits_paths)"
|
58 |
+
]
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"cell_type": "code",
|
62 |
+
"execution_count": 57,
|
63 |
+
"id": "a9a90d18",
|
64 |
+
"metadata": {},
|
65 |
+
"outputs": [
|
66 |
+
{
|
67 |
+
"name": "stdout",
|
68 |
+
"output_type": "stream",
|
69 |
+
"text": [
|
70 |
+
"1014\n",
|
71 |
+
"861\n"
|
72 |
+
]
|
73 |
+
}
|
74 |
+
],
|
75 |
+
"source": [
|
76 |
+
"df_test = pd.read_json('./GBI-16-2D/splits/full_test.jsonl', lines=True)\n",
|
77 |
+
"df_train = pd.read_json('./GBI-16-2D/splits/full_train.jsonl', lines=True)\n",
|
78 |
+
"\n",
|
79 |
+
"df = pd.concat([df_train, df_test])\n",
|
80 |
+
"\n",
|
81 |
+
"print(len(df))\n",
|
82 |
+
"df = df[df['exposure_time'] >= 30]\n",
|
83 |
+
"print(len(df))"
|
84 |
+
]
|
85 |
+
},
|
86 |
+
{
|
87 |
+
"cell_type": "code",
|
88 |
+
"execution_count": 58,
|
89 |
+
"id": "f965da24",
|
90 |
+
"metadata": {},
|
91 |
+
"outputs": [
|
92 |
+
{
|
93 |
+
"name": "stdout",
|
94 |
+
"output_type": "stream",
|
95 |
+
"text": [
|
96 |
+
"Symmetric?\n",
|
97 |
+
"True\n",
|
98 |
+
"(861, 861)\n"
|
99 |
+
]
|
100 |
+
}
|
101 |
+
],
|
102 |
+
"source": [
|
103 |
+
"latitudes = list(df['dec'])\n",
|
104 |
+
"longitudes = list(df['ra'])\n",
|
105 |
+
"\n",
|
106 |
+
"n_points = len(latitudes)\n",
|
107 |
+
"\n",
|
108 |
+
"# Repeat each point n_points times for lat1, lon1\n",
|
109 |
+
"lat1 = np.repeat(latitudes, n_points)\n",
|
110 |
+
"lon1 = np.repeat(longitudes, n_points)\n",
|
111 |
+
"\n",
|
112 |
+
"# Tile the whole array n_points times for lat2, lon2\n",
|
113 |
+
"lat2 = np.tile(latitudes, n_points)\n",
|
114 |
+
"lon2 = np.tile(longitudes, n_points)\n",
|
115 |
+
"\n",
|
116 |
+
"# Calculates angular separation between two spherical coords\n",
|
117 |
+
"# This can be lat/lon or ra/dec\n",
|
118 |
+
"# Taken from astropy\n",
|
119 |
+
"def angular_separation_deg(lon1, lat1, lon2, lat2):\n",
|
120 |
+
" lon1 = np.deg2rad(lon1)\n",
|
121 |
+
" lon2 = np.deg2rad(lon2)\n",
|
122 |
+
" lat1 = np.deg2rad(lat1)\n",
|
123 |
+
" lat2 = np.deg2rad(lat2)\n",
|
124 |
+
" \n",
|
125 |
+
" sdlon = np.sin(lon2 - lon1)\n",
|
126 |
+
" cdlon = np.cos(lon2 - lon1)\n",
|
127 |
+
" slat1 = np.sin(lat1)\n",
|
128 |
+
" slat2 = np.sin(lat2)\n",
|
129 |
+
" clat1 = np.cos(lat1)\n",
|
130 |
+
" clat2 = np.cos(lat2)\n",
|
131 |
+
"\n",
|
132 |
+
" num1 = clat2 * sdlon\n",
|
133 |
+
" num2 = clat1 * slat2 - slat1 * clat2 * cdlon\n",
|
134 |
+
" denominator = slat1 * slat2 + clat1 * clat2 * cdlon\n",
|
135 |
+
"\n",
|
136 |
+
" return np.rad2deg(np.arctan2(np.hypot(num1, num2), denominator))\n",
|
137 |
+
"\n",
|
138 |
+
"# Compute the pairwise angular separations\n",
|
139 |
+
"angular_separations = angular_separation_deg(lon1, lat1, lon2, lat2)\n",
|
140 |
+
"\n",
|
141 |
+
"# Reshape the result into a matrix form\n",
|
142 |
+
"angular_separations_matrix = angular_separations.reshape(n_points, n_points)\n",
|
143 |
+
"\n",
|
144 |
+
"def check_symmetric(a, rtol=1e-05, atol=1e-07):\n",
|
145 |
+
" return np.allclose(a, a.T, rtol=rtol, atol=atol)\n",
|
146 |
+
"\n",
|
147 |
+
"print(\"Symmetric?\")\n",
|
148 |
+
"print(check_symmetric(angular_separations_matrix))\n",
|
149 |
+
"print(angular_separations_matrix.shape)"
|
150 |
+
]
|
151 |
+
},
|
152 |
+
{
|
153 |
+
"cell_type": "code",
|
154 |
+
"execution_count": 59,
|
155 |
+
"id": "6670e994",
|
156 |
+
"metadata": {},
|
157 |
+
"outputs": [],
|
158 |
+
"source": [
|
159 |
+
"KECK_DEG_PER_PIXEL = 3.75e-5\n",
|
160 |
+
"KECK_FOV = 3768 * KECK_DEG_PER_PIXEL\n",
|
161 |
+
"THRESH = KECK_FOV * 2\n",
|
162 |
+
"\n",
|
163 |
+
"clustering = AgglomerativeClustering(n_clusters=None, metric='precomputed', linkage='single', distance_threshold=THRESH)\n",
|
164 |
+
"labels = clustering.fit_predict(angular_separations_matrix)"
|
165 |
+
]
|
166 |
+
},
|
167 |
+
{
|
168 |
+
"cell_type": "code",
|
169 |
+
"execution_count": 60,
|
170 |
+
"id": "ec592fb5",
|
171 |
+
"metadata": {},
|
172 |
+
"outputs": [
|
173 |
+
{
|
174 |
+
"name": "stderr",
|
175 |
+
"output_type": "stream",
|
176 |
+
"text": [
|
177 |
+
"100%|ββββοΏ½οΏ½οΏ½ββββββββββββββββββββββββββββββββββ| 137/137 [00:00<00:00, 1211.58it/s]"
|
178 |
+
]
|
179 |
+
},
|
180 |
+
{
|
181 |
+
"name": "stdout",
|
182 |
+
"output_type": "stream",
|
183 |
+
"text": [
|
184 |
+
"Max subset with minimum distance: 137\n"
|
185 |
+
]
|
186 |
+
},
|
187 |
+
{
|
188 |
+
"name": "stderr",
|
189 |
+
"output_type": "stream",
|
190 |
+
"text": [
|
191 |
+
"\n"
|
192 |
+
]
|
193 |
+
}
|
194 |
+
],
|
195 |
+
"source": [
|
196 |
+
"RA_NAME = 'ra'\n",
|
197 |
+
"DEC_NAME = 'dec'\n",
|
198 |
+
"\n",
|
199 |
+
"def max_subset_with_min_distance(points, min_distance):\n",
|
200 |
+
" subset = []\n",
|
201 |
+
" for i, row in points.iterrows():\n",
|
202 |
+
" if all(angular_separation_deg(row[RA_NAME], row[DEC_NAME], existing_point[RA_NAME], existing_point[DEC_NAME]) >= min_distance for existing_point in subset):\n",
|
203 |
+
" subset.append(row)\n",
|
204 |
+
" return subset\n",
|
205 |
+
"\n",
|
206 |
+
"all_subsets = []\n",
|
207 |
+
"\n",
|
208 |
+
"for label in tqdm(np.unique(labels)):\n",
|
209 |
+
" cds = df[labels == label]\n",
|
210 |
+
" subset = max_subset_with_min_distance(cds, THRESH)\n",
|
211 |
+
" all_subsets.extend(subset)\n",
|
212 |
+
"\n",
|
213 |
+
"print(\"Max subset with minimum distance:\", len(all_subsets))\n",
|
214 |
+
"\n",
|
215 |
+
"locations = pd.DataFrame(all_subsets)"
|
216 |
+
]
|
217 |
+
},
|
218 |
+
{
|
219 |
+
"cell_type": "code",
|
220 |
+
"execution_count": 74,
|
221 |
+
"id": "b141c2e9",
|
222 |
+
"metadata": {},
|
223 |
+
"outputs": [],
|
224 |
+
"source": [
|
225 |
+
"for path in [\"./GBI-16-2D/prelim_data/\" + s.split('/')[-1] for s in locations['image']]:\n",
|
226 |
+
" shutil.move(path, path.replace(\"prelim_data\", \"data\"))"
|
227 |
+
]
|
228 |
+
}
|
229 |
+
],
|
230 |
+
"metadata": {
|
231 |
+
"kernelspec": {
|
232 |
+
"display_name": "Python 3 (ipykernel)",
|
233 |
+
"language": "python",
|
234 |
+
"name": "python3"
|
235 |
+
},
|
236 |
+
"language_info": {
|
237 |
+
"codemirror_mode": {
|
238 |
+
"name": "ipython",
|
239 |
+
"version": 3
|
240 |
+
},
|
241 |
+
"file_extension": ".py",
|
242 |
+
"mimetype": "text/x-python",
|
243 |
+
"name": "python",
|
244 |
+
"nbconvert_exporter": "python",
|
245 |
+
"pygments_lexer": "ipython3",
|
246 |
+
"version": "3.10.13"
|
247 |
+
}
|
248 |
+
},
|
249 |
+
"nbformat": 4,
|
250 |
+
"nbformat_minor": 5
|
251 |
+
}
|