fedric95 commited on
Commit
a37f9bf
·
verified ·
1 Parent(s): 02530cb

Upload 8 files

Browse files
Files changed (8) hide show
  1. download.py +153 -0
  2. extract.py +24 -0
  3. harmonize.py +56 -0
  4. label.py +154 -0
  5. main.py +14 -0
  6. push.py +16 -0
  7. tiling.py +46 -0
  8. utils.py +23 -0
download.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from shapely.geometry import shape
3
+ import os
4
+ import collections
5
+ from tqdm import tqdm
6
+ import pandas as pd
7
+ from multiprocessing import cpu_count
8
+ from multiprocessing.pool import ThreadPool
9
+ import requests
10
+ import networkx as nx
11
+ import rasterio as rio
12
+ from extract import extract
13
+ from tiling import get_tiles
14
+
15
+ from huggingface_hub import HfApi
16
+ from datetime import datetime
17
+ from tiling import get_tiles
18
+
19
+ def download_image(args):
20
+ url, fn = args['image_href'], args['image']
21
+
22
+ if os.path.exists(fn) is False:
23
+ try:
24
+ r = requests.get(url)
25
+ with open(fn, 'wb') as f:
26
+ f.write(r.content)
27
+ except Exception as e:
28
+ print('Exception in download_url():', e)
29
+ return url
30
+
31
+ with rio.open(fn, "r") as ds:
32
+ print(args['date'], args['id'], ds.crs.to_proj4())
33
+
34
+ return url
35
+
36
+
37
+ if __name__ == '__main__':
38
+ api = HfApi()
39
+
40
+ image_dir = './dataset/image'
41
+
42
+ if os.path.exists('data.json') == False:
43
+ extract('data.json')
44
+
45
+ with open('data.json') as f:
46
+ data = json.load(f)
47
+
48
+
49
+ ids = [f['id'] for f in data['features']]
50
+ duplicated = [item for item, count in collections.Counter(ids).items() if count > 1]
51
+ for duplicated_instance in duplicated:
52
+ items = []
53
+ for f in data['features']:
54
+ if f['id'] == duplicated_instance:
55
+ items.append(json.dumps(f))
56
+ assert len(collections.Counter(items).keys()) == 1, 'Unexpected duplicated item' # Tutti gli elementi che hanno lo stesso id sono completamente identici
57
+
58
+ # Prendo tutte le feature che sono univoce a livello di contenuto
59
+ data['features'] =[json.loads(f) for f in list(set([json.dumps(f) for f in data['features']]))]
60
+ #data['features'] = data['features'][:2]
61
+
62
+ records = []
63
+ for idx in tqdm(range(len(data['features']))):
64
+ feature = data['features'][idx]
65
+
66
+ gec = feature['assets'].get('GEC')
67
+ if gec is None:
68
+ continue
69
+
70
+
71
+ metadata = feature['assets']['metadata']['content']
72
+ assert len(metadata['collects']) == 1, 'Unexpected situation'
73
+ assert len(metadata['derivedProducts']['GEC']) == 1, 'Unexpected situation'
74
+
75
+ parsed_date = datetime.fromisoformat(metadata['collects'][0]['startAtUTC'])
76
+ if parsed_date.year < 2024:
77
+ continue
78
+
79
+
80
+ records.append({
81
+ 'id': feature['id'],
82
+ 'date': metadata['collects'][0]['startAtUTC'],
83
+ 'bbox': feature['bbox'],
84
+ 'geometry': feature['geometry'],
85
+ 'satellite': metadata['umbraSatelliteName'],
86
+ 'track': metadata['collects'][0]['satelliteTrack'],
87
+ 'direction': metadata['collects'][0]['observationDirection'],
88
+ 'mode': metadata['imagingMode'],
89
+ 'band': metadata['collects'][0]['radarBand'],
90
+ 'polarization': metadata['collects'][0]['polarizations'],
91
+ 'azimuth_res': metadata['derivedProducts']['GEC'][0]['groundResolution']['azimuthMeters'],
92
+ 'range_res': metadata['derivedProducts']['GEC'][0]['groundResolution']['rangeMeters'],
93
+ 'rows': metadata['derivedProducts']['GEC'][0]['numRows'],
94
+ 'cols': metadata['derivedProducts']['GEC'][0]['numColumns'],
95
+ 'size': metadata['derivedProducts']['GEC'][0]['numRows']*metadata['derivedProducts']['GEC'][0]['numColumns'],
96
+ 'image_href': gec['href'],
97
+ 'image': os.path.join(image_dir, '{name}.tiff'.format(name=feature['id']))
98
+ })
99
+
100
+
101
+
102
+ cpus = cpu_count()
103
+ results = ThreadPool(cpus - 1).imap_unordered(download_image, records)
104
+ for result in results:
105
+ print('url:', result)
106
+
107
+ for record in records:
108
+ try:
109
+ with rio.open(record['image']) as src:
110
+ image_crs = src.crs.to_proj4()
111
+ record['crs'] = src.crs.to_proj4()
112
+ except:
113
+ record['crs'] = 'None'
114
+ print('Error reading the image')
115
+
116
+ df = pd.DataFrame.from_records(records)
117
+ df.to_excel('out.xlsx')
118
+
119
+
120
+
121
+ selected_records = []
122
+ for record in records:
123
+ if record['crs'] == '+proj=longlat +datum=WGS84 +no_defs=True':
124
+ out_dir = 'dataset/tile/{id}'.format(id = record['id'])
125
+ if os.path.exists(out_dir) is False:
126
+ os.mkdir(out_dir)
127
+ selected_records.append({'input_path': record['image'], 'out_dir': out_dir, 'patch_size': 2048})
128
+ cpus = cpu_count()
129
+ results = ThreadPool(cpus - 1).imap_unordered(get_tiles, selected_records)
130
+ for result in results:
131
+ print('url:', result)
132
+
133
+
134
+
135
+ api.upload_file(
136
+ path_or_fileobj='out.xlsx',
137
+ path_in_repo='out.xlsx',
138
+ repo_id='fedric95/umbra',
139
+ repo_type='dataset',
140
+ )
141
+
142
+ api.upload_file(
143
+ path_or_fileobj='data.json',
144
+ path_in_repo='data.json',
145
+ repo_id='fedric95/umbra',
146
+ repo_type='dataset',
147
+ )
148
+
149
+ api.upload_large_folder(
150
+ repo_id='fedric95/umbra',
151
+ repo_type='dataset',
152
+ folder_path='./dataset/',
153
+ )
extract.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Get the collaction of all the metadata of the umbra open data collection
3
+ """
4
+
5
+ import requests
6
+ import urllib.parse
7
+ import json
8
+ import pystac
9
+
10
+ def extract(out_path):
11
+
12
+ base_url = 'https://s3.us-west-2.amazonaws.com/umbra-open-data-catalog/stac/'
13
+ home = 'catalog.json'
14
+ url = urllib.parse.urljoin(base_url, home)
15
+
16
+ collection = {'type': 'FeatureCollection', 'features': []}
17
+ catalog = pystac.Catalog.from_file(url)
18
+ for idx, item in enumerate(catalog.get_all_items()):
19
+ item = item.to_dict()
20
+ item['assets']['metadata']['content'] = requests.get(item['assets']['metadata']['href']).json()
21
+ collection['features'].append(item)
22
+
23
+ with open(out_path, 'w') as f:
24
+ json.dump(collection, f)
harmonize.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from rasterio.enums import Resampling
3
+ from huggingface_hub import hf_hub_download
4
+ import pandas as pd
5
+ import rasterio
6
+ import matplotlib.pyplot as plt
7
+
8
+ df = pd.read_excel('out.xlsx')
9
+
10
+ records = []
11
+ for index, row in df.iterrows():
12
+ if row['crs'] == '+proj=longlat +datum=WGS84 +no_defs=True':
13
+ records.append(row)
14
+
15
+ for record in records:
16
+
17
+ hf_hub_download(
18
+ 'fedric95/umbra',
19
+ './image/{id}.tiff'.format(id=record['id']),
20
+ local_dir = 'dataset/',
21
+ repo_type = 'dataset'
22
+ )
23
+ hf_hub_download(
24
+ 'fedric95/umbra',
25
+ './label/{id}.tiff'.format(id=record['id']),
26
+ local_dir = 'dataset/',
27
+ repo_type = 'dataset'
28
+ )
29
+
30
+ image = './dataset/image/{id}.tiff'.format(id=record['id'])
31
+ label = './dataset/label/{id}.tiff'.format(id=record['id'])
32
+
33
+ with rasterio.open(image) as image_src:
34
+ with rasterio.open(label) as label_src:
35
+
36
+ # resample data to target shape
37
+ data = label_src.read(
38
+ out_shape=(
39
+ label_src.count,
40
+ image_src.height,
41
+ image_src.width
42
+ ),
43
+ resampling=Resampling.bilinear
44
+ )
45
+
46
+ # scale image transform
47
+ transform = label_src.transform * label_src.transform.scale(
48
+ (label_src.width / data.shape[-1]),
49
+ (label_src.height / data.shape[-2])
50
+ )
51
+
52
+ import pdb
53
+ pdb.set_trace()
54
+
55
+ plt.imshow(data[0, :, :])
56
+ plt.savefig('label.png')
label.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Visualize the location of the acquisition made for the umbra open data collection
3
+ """
4
+
5
+
6
+ import rasterio
7
+ import matplotlib.pyplot as plt
8
+ from sentinelhub import SHConfig, BBox, MimeType, SentinelHubRequest, DataCollection, bbox_to_dimensions, BBoxSplitter, SentinelHubDownloadClient, MosaickingOrder
9
+ from pathlib import Path
10
+
11
+
12
+ import numpy as np
13
+ import rasterio
14
+ from rasterio import features
15
+ from rasterio.mask import mask
16
+ from sentinelhub.geometry import Geometry
17
+ # the first one is your raster on the right
18
+ # and the second one your red raster
19
+ from rasterio.crs import CRS
20
+ import pandas as pd
21
+ import shutil
22
+ from multiprocessing import cpu_count
23
+ from multiprocessing.pool import ThreadPool
24
+ from huggingface_hub import HfApi
25
+ from rasterio.enums import Resampling
26
+ from huggingface_hub import hf_hub_download
27
+
28
+
29
+ def download_image(args):
30
+ time_interval = ['2021/1/1', '2021/12/31']
31
+
32
+ geometry = args['geometry']
33
+ bbox = args['bbox']
34
+ crs = args['crs']
35
+ crs = CRS.from_proj4(crs).to_string()
36
+
37
+ geometry = Geometry(eval(geometry), crs=crs)
38
+ bbox = BBox(eval(bbox), crs=crs)
39
+ size = bbox_to_dimensions(bbox, RESOLUTION)
40
+
41
+
42
+ try:
43
+ request = SentinelHubRequest(
44
+ data_folder="test_dir",
45
+ evalscript=WORLDCOVER,
46
+ input_data=[
47
+ SentinelHubRequest.input_data(
48
+ data_collection=DataCollection.define_byoc(collection_id="0b940c63-45dd-4e6b-8019-c3660b81b884"),
49
+ time_interval=time_interval,
50
+ mosaicking_order=MosaickingOrder.MOST_RECENT
51
+ )
52
+ ],
53
+ responses=[SentinelHubRequest.output_response("default", MimeType.TIFF)],
54
+ geometry=geometry,
55
+ size=size,
56
+ config=None,
57
+ )
58
+ dl_requests = [request.download_list[0]]
59
+ _ = SentinelHubDownloadClient(config=None).download(dl_requests)
60
+ print(crs)
61
+ except Exception as e:
62
+ print('Error'+crs)
63
+ return e
64
+
65
+ label_path = Path(request.data_folder) / request.get_filename_list()[0]
66
+
67
+
68
+ hf_hub_download(
69
+ 'fedric95/umbra',
70
+ './image/{id}.tiff'.format(id=args['id']),
71
+ local_dir = 'dataset/',
72
+ repo_type = 'dataset'
73
+ )
74
+ image_path = './dataset/image/{id}.tiff'.format(id=args['id'])
75
+ label_out = './dataset/label/{id}.tiff'.format(id=args['id'])
76
+
77
+ with rasterio.open(image_path) as image_src:
78
+ with rasterio.open(label_path) as label_src:
79
+
80
+ # resample data to target shape
81
+ data = label_src.read(
82
+ out_shape=(
83
+ label_src.count,
84
+ image_src.height,
85
+ image_src.width
86
+ ),
87
+ resampling=Resampling.bilinear
88
+ )
89
+
90
+ # scale image transform
91
+ transform = label_src.transform * label_src.transform.scale(
92
+ (label_src.width / data.shape[-1]),
93
+ (label_src.height / data.shape[-2])
94
+ )
95
+
96
+ dst_kwargs = label_src.meta.copy()
97
+ dst_kwargs.update(
98
+ {
99
+ "transform": transform,
100
+ "width": data.shape[-1],
101
+ "height": data.shape[-2],
102
+ "nodata": 0,
103
+ }
104
+ )
105
+
106
+ with rasterio.open(label_out, "w", **dst_kwargs) as dst:
107
+ for i in range(data.shape[0]):
108
+ dst.write(data[i], i+1)
109
+ return label_out
110
+
111
+ if __name__ == '__main__':
112
+
113
+ WORLDCOVER = """
114
+ //VERSION=3
115
+
116
+ // This custom script visualises WorldCover map
117
+
118
+ function setup() {
119
+ return {
120
+ input: ["Map", "dataMask"],
121
+ output: {
122
+ bands: 2,
123
+ sampleType: "INT8"
124
+ }
125
+ }
126
+ }
127
+
128
+ function evaluatePixel(sample) {
129
+ return [sample.Map, sample.dataMask];
130
+ }
131
+ """
132
+ RESOLUTION = 10
133
+
134
+
135
+ df = pd.read_excel('out.xlsx')
136
+
137
+ records = []
138
+ for index, row in df.iterrows():
139
+ if row['crs'] == '+proj=longlat +datum=WGS84 +no_defs=True':
140
+ records.append(row)
141
+
142
+ cpus = cpu_count()
143
+ results = ThreadPool(cpus - 1).imap_unordered(download_image, records)
144
+ for result in results:
145
+ print('url:', result)
146
+
147
+ api = HfApi()
148
+ api.upload_folder(
149
+ repo_id='fedric95/umbra',
150
+ repo_type='dataset',
151
+ folder_path='./dataset/label',
152
+ path_in_repo='./label'
153
+ )
154
+
main.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Visualize the location of the acquisition made for the umbra open data collection
3
+ """
4
+
5
+ import json
6
+ import folium
7
+ import pandas as pd
8
+
9
+ with open('data.json') as f:
10
+ data = json.load(f)
11
+
12
+ m = folium.Map(zoom_start=4)
13
+ folium.GeoJson(data).add_to(m)
14
+ m.save("index.html")
push.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from datasets import Dataset, Image
3
+ import numpy as np
4
+ import rasterio
5
+ from rasterio.enums import Resampling
6
+ import os
7
+ #from utils import image_to_patches
8
+ import mosaic.esalulc
9
+ import datetime
10
+
11
+
12
+
13
+ data = pd.read_excel('out.xlsx')
14
+ msg_ds = Dataset.from_pandas(data)
15
+ msg_ds = msg_ds.cast_column("image", Image(decode=False))
16
+ msg_ds.push_to_hub("fedric95/umbra", private=True)
tiling.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from itertools import product
3
+ import rasterio as rio
4
+ import rasterio.mask as mask
5
+ from rasterio import windows
6
+ import os
7
+
8
+
9
+ def split(ds, width, height):
10
+ nols, nrows = ds.meta['width'], ds.meta['height']
11
+ offsets = product(range(0, nols, width), range(0, nrows, height))
12
+ big_window = windows.Window(col_off=0, row_off=0, width=nols, height=nrows)
13
+ for col_off, row_off in offsets:
14
+ window =windows.Window(col_off=col_off, row_off=row_off, width=width, height=height).intersection(big_window)
15
+ transform = windows.transform(window, ds.transform)
16
+ yield window, transform
17
+
18
+
19
+
20
+ def get_tiles(args):
21
+
22
+ input_path, out_dir, patch_size = args['input_path'], args['out_dir'], args['patch_size']
23
+
24
+ id = os.path.basename(input_path).replace('.tiff', '')
25
+ output_filename = '{id}_{col}-{row}.tiff'
26
+ tile_width, tile_height = patch_size, patch_size
27
+
28
+ with rio.open(input_path) as inds:
29
+
30
+ meta = inds.meta.copy()
31
+
32
+ for window, transform in split(inds, tile_width, tile_height):
33
+ if window.width != patch_size or window.height != patch_size:
34
+ continue
35
+
36
+ print(window)
37
+ meta['transform'] = transform
38
+ meta['width'], meta['height'] = window.width, window.height
39
+
40
+ outpath = os.path.join(out_dir,output_filename.format(id = id, col=int(window.col_off), row=int(window.row_off)))
41
+ if os.path.exists(outpath):
42
+ continue
43
+ with rio.open(outpath, 'w', **meta) as outds:
44
+ data = inds.read(window=window)
45
+ #out_image, out_transform = mask.mask(outds, [geometry], crop=True)
46
+ outds.write(data)
utils.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ def image_to_patches(img, label = None, patch_height, patch_width):
5
+ H, W = img.shape
6
+ # Calculate how many patches fit along each dimension
7
+ num_patches_vert = H // patch_height
8
+ num_patches_horz = W // patch_width
9
+
10
+ patches = []
11
+ for i in range(num_patches_vert):
12
+ for j in range(num_patches_horz):
13
+ patch_img = img[i * patch_height:(i+1) * patch_height,
14
+ j * patch_width:(j+1) * patch_width
15
+ ]
16
+ if label is not None:
17
+ patch_label = label[i * patch_height:(i+1) * patch_height,
18
+ j * patch_width:(j+1) * patch_width
19
+ ]
20
+ patches.append((patch_img, patch_label))
21
+ else:
22
+ patches.append(patch_img)
23
+ return patches