File size: 7,150 Bytes
cff69e6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
# -*- coding: utf-8 -*-
"""UrbanTreeCanopyInDurham2Dataset

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1X59zPtI7ydiX10ZnfjsNGvnKNTXgwrWs
"""

from datasets import DatasetInfo, Features, Value, load_dataset, BuilderConfig, DatasetBuilder
import csv
import json
import os
from typing import List
import datasets
import logging
from datasets import DownloadManager, SplitGenerator, Split
import zipfile
import pandas as pd
import geopandas as gpd
import tempfile
import shutil
import plotly.express as px
from datasets import GeneratorBasedBuilder

class UrbanTreeCanopyInDurham2Dataset(GeneratorBasedBuilder):

    def _info(self):
        return DatasetInfo(
            description="Urban_Tree_Canopy_in_Durham2",
            features=Features(
                {
                    "objectid": Value("int32"),
                    "streetaddr": Value("string"),
                    "city_x": Value("string"),
                    "zipcode_x": Value("string"),
                    "species_x": Value("string"),
                    "commonname_x": Value("string"),
                    "plantingda": datasets.Value("timestamp[us]"),
                    "diameterin_x": Value("float"),
                    "heightft_x": Value("float"),
                    "condition_x": Value("string"),
                    "program_x": Value("string"),
                    "matureheig": Value("float"),
                    "created_da": datasets.Value("timestamp[us]"),
                    "last_edi_1": datasets.Value("timestamp[us]"),
                    "geometry_x": Value("string"),
                    "x": Value("float"),
                    "y": Value("float"),
                    "coremoved_": Value("float"),
                    "coremove_1": Value("float"),
                    "o3removed_": Value("float"),
                    "o3remove_1": Value("float"),
                    "no2removed": Value("float"),
                    "no2remov_1": Value("float"),
                    "so2removed": Value("float"),
                    "so2remov_1": Value("float"),
                    "pm10remove": Value("float"),
                    "pm10remo_1": Value("float"),
                    "pm25remove": Value("float"),
                    "o2producti": Value("float"),
                }
            ),
            supervised_keys=None,
            homepage="https://github.com/AuraMa111/Urban_Tree_Canopy_in_Durham",
            citation="A citation or reference to the source of the dataset.",
        )



    def _split_generators(self, dl_manager):
        csv_url = "https://drive.google.com/uc?export=download&id=18HmgMbtbntWsvAySoZr4nV1KNu-i7GCy"
        geojson_url = "https://drive.google.com/uc?export=download&id=1jpFVanNGy7L5tVO-Z_nltbBXKvrcAoDo"

        # Extract the file ID from the SHP Google Drive sharing URL and construct a direct download link
        shp_file_id = "1DYcug0xiWYlsKZorbbEcrjZWEAB0y4MY"
        shp_url = f"https://drive.google.com/uc?export=download&id={shp_file_id}"

        # Use dl_manager to download the files
        csv_path = dl_manager.download_and_extract(csv_url)
        shp_path = dl_manager.download_and_extract(shp_url)
        geojson_path = dl_manager.download_and_extract(geojson_url)

        # Assuming the paths are directories, construct the full paths to the files
        csv_file_path = os.path.join(csv_path, 'Trees_%26_Planting_Sites.csv')
        shp_file_path = os.path.join(shp_path, 'GS_TreeInventory.shp')  # Adjust if necessary
        geojson_file_path = os.path.join(geojson_path, 'Trees_%26_Planting_Sites.geojson')

        # Now you can return the paths for use in your data processing
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "csv_path": csv_file_path,
                    "shp_path": shp_file_path,
                    "geojson_path": geojson_file_path,
                },
            ),
        ]

    def _generate_examples(self, csv_path, shp_path, geojson_path):
        """Yields examples as (key, example) tuples."""

        # Load the datasets
        csv_df = pd.read_csv(csv_path)
        shp_gdf = gpd.read_file(shp_path)
        with open(geojson_path, 'r') as f:
            geojson_data = json.load(f)
        geojson_gdf = gpd.GeoDataFrame.from_features(geojson_data["features"])

        # Standardize column names
        csv_df.columns = csv_df.columns.str.lower().str.replace(' ', '_')
        shp_gdf.columns = shp_gdf.columns.str.lower().str.replace(' ', '_')
        geojson_gdf.columns = geojson_gdf.columns.str.lower().str.replace(' ', '_')

        # Convert 'objectid' to int
        csv_df['objectid'] = csv_df['objectid'].astype(int)
        shp_gdf['objectid'] = shp_gdf['objectid'].astype(int)
        geojson_gdf['objectid'] = geojson_gdf['objectid'].astype(int)

        # Merge the dataframes on 'objectid'
        combined_gdf = shp_gdf.merge(csv_df, on='objectid', how='inner')
        combined_gdf = combined_gdf.merge(geojson_gdf, on='objectid', how='inner')
        combined_gdf=combined_gdf[["objectid", "streetaddr", "city_x", "zipcode_x",
                                    "species_x", "commonname_x", "plantingda", "diameterin_x",
                                    "heightft_x", "condition_x", "program_x", "matureheig",
                                    "created_da", "last_edi_1", "geometry_x",
                                    "x", "y",
                                    "coremoved_", "coremove_1",
                                    "o3removed_", "o3remove_1",
                                    "no2removed", "no2remov_1",
                                    "so2removed", "so2remov_1",
                                    "pm10remove", "pm10remo_1",
                                    "pm25remove", "o2producti",
                                ]]

        # Yield the combined data
        for idx, row in combined_gdf.iterrows():
            # Yield each row as an example, using the index as the key
            yield idx, row.to_dict()

    @staticmethod
    def plot_spatial_distribution(combined_gdf, lat_col='x', lon_col='y', color_col='species_x', hover_col='species_x'):
            # Calculate the mean latitude and longitude for the center of the map
        center_lat = combined_gdf[lat_col].mean()
        center_lon = combined_gdf[lon_col].mean()

            # Create a scatter mapbox plot
        fig = px.scatter_mapbox(combined_gdf,
                                    lat=lat_col,
                                    lon=lon_col,
                                    color=color_col,
                                    hover_name=hover_col,
                                    center={"lat": center_lat, "lon": center_lon},
                                    zoom=10,
                                    height=600,
                                    width=800)

            # Set the mapbox style to "open-street-map"
        fig.update_layout(mapbox_style="open-street-map")

            # Display the figure
        fig.show()