import json from shapely.geometry import shape import os import collections from tqdm import tqdm import pandas as pd from multiprocessing import cpu_count from multiprocessing.pool import ThreadPool import requests import networkx as nx import rasterio as rio from extract import extract from tiling import get_tiles from huggingface_hub import HfApi from datetime import datetime from tiling import get_tiles def download_image(args): url, fn = args['image_href'], args['image'] if os.path.exists(fn) is False: try: r = requests.get(url) with open(fn, 'wb') as f: f.write(r.content) except Exception as e: print('Exception in download_url():', e) return url with rio.open(fn, "r") as ds: print(args['date'], args['id'], ds.crs.to_proj4()) return url if __name__ == '__main__': api = HfApi() image_dir = './dataset/image' if os.path.exists('data.json') == False: extract('data.json') with open('data.json') as f: data = json.load(f) ids = [f['id'] for f in data['features']] duplicated = [item for item, count in collections.Counter(ids).items() if count > 1] for duplicated_instance in duplicated: items = [] for f in data['features']: if f['id'] == duplicated_instance: items.append(json.dumps(f)) assert len(collections.Counter(items).keys()) == 1, 'Unexpected duplicated item' # Tutti gli elementi che hanno lo stesso id sono completamente identici # Prendo tutte le feature che sono univoce a livello di contenuto data['features'] =[json.loads(f) for f in list(set([json.dumps(f) for f in data['features']]))] #data['features'] = data['features'][:2] records = [] for idx in tqdm(range(len(data['features']))): feature = data['features'][idx] gec = feature['assets'].get('GEC') if gec is None: continue metadata = feature['assets']['metadata']['content'] assert len(metadata['collects']) == 1, 'Unexpected situation' assert len(metadata['derivedProducts']['GEC']) == 1, 'Unexpected situation' parsed_date = datetime.fromisoformat(metadata['collects'][0]['startAtUTC']) if parsed_date.year < 2024: continue records.append({ 'id': feature['id'], 'date': metadata['collects'][0]['startAtUTC'], 'bbox': feature['bbox'], 'geometry': feature['geometry'], 'satellite': metadata['umbraSatelliteName'], 'track': metadata['collects'][0]['satelliteTrack'], 'direction': metadata['collects'][0]['observationDirection'], 'mode': metadata['imagingMode'], 'band': metadata['collects'][0]['radarBand'], 'polarization': metadata['collects'][0]['polarizations'], 'azimuth_res': metadata['derivedProducts']['GEC'][0]['groundResolution']['azimuthMeters'], 'range_res': metadata['derivedProducts']['GEC'][0]['groundResolution']['rangeMeters'], 'rows': metadata['derivedProducts']['GEC'][0]['numRows'], 'cols': metadata['derivedProducts']['GEC'][0]['numColumns'], 'size': metadata['derivedProducts']['GEC'][0]['numRows']*metadata['derivedProducts']['GEC'][0]['numColumns'], 'image_href': gec['href'], 'image': os.path.join(image_dir, '{name}.tiff'.format(name=feature['id'])) }) cpus = cpu_count() results = ThreadPool(cpus - 1).imap_unordered(download_image, records) for result in results: print('url:', result) for record in records: try: with rio.open(record['image']) as src: image_crs = src.crs.to_proj4() record['crs'] = src.crs.to_proj4() except: record['crs'] = 'None' print('Error reading the image') df = pd.DataFrame.from_records(records) df.to_excel('out.xlsx') selected_records = [] for record in records: if record['crs'] == '+proj=longlat +datum=WGS84 +no_defs=True': out_dir = 'dataset/tile/{id}'.format(id = record['id']) if os.path.exists(out_dir) is False: os.mkdir(out_dir) selected_records.append({'input_path': record['image'], 'out_dir': out_dir, 'patch_size': 2048}) cpus = cpu_count() results = ThreadPool(cpus - 1).imap_unordered(get_tiles, selected_records) for result in results: print('url:', result) api.upload_file( path_or_fileobj='out.xlsx', path_in_repo='out.xlsx', repo_id='fedric95/umbra', repo_type='dataset', ) api.upload_file( path_or_fileobj='data.json', path_in_repo='data.json', repo_id='fedric95/umbra', repo_type='dataset', ) api.upload_large_folder( repo_id='fedric95/umbra', repo_type='dataset', folder_path='./dataset/', )