{ "cells": [ { "cell_type": "markdown", "id": "39bf1de3-cba6-475a-a988-ad48e5af4a04", "metadata": {}, "source": [ "# Get zonal stats " ] }, { "cell_type": "code", "execution_count": null, "id": "ba047a55-642d-4c27-a367-5f35f4406218", "metadata": {}, "outputs": [], "source": [ "import ibis\n", "import ibis.selectors as s\n", "from ibis import _\n", "import fiona\n", "import geopandas as gpd\n", "import rioxarray\n", "from shapely.geometry import box\n", "\n", "import rasterio\n", "from rasterio.mask import mask\n", "from rasterstats import zonal_stats\n", "import pandas as pd\n", "from joblib import Parallel, delayed\n", "\n", "con = ibis.duckdb.connect()\n", "con.load_extension(\"spatial\")\n", "threads = -1" ] }, { "cell_type": "code", "execution_count": null, "id": "8b5656db-2d1d-4ca8-826d-7588126e52e8", "metadata": {}, "outputs": [], "source": [ "# cropping US data to only CA \n", "def crop_raster_to_bounds(tif_file, vector_gdf):\n", " with rasterio.open(tif_file) as src:\n", " # Get California's bounding box in the same CRS as the raster\n", " california_bounds = vector_gdf.total_bounds\n", " california_bounds = rasterio.coords.BoundingBox(\n", " *california_bounds\n", " )\n", " # Crop the raster to the California bounding box\n", " out_image, out_transform = mask(src, [california_bounds], crop=True)\n", " out_meta = src.meta.copy()\n", " out_meta.update({\n", " \"driver\": \"GTiff\",\n", " \"height\": out_image.shape[1],\n", " \"width\": out_image.shape[2],\n", " \"transform\": out_transform\n", " })\n", " print(\"Unique values in cropped raster:\", np.unique(out_image))\n", "\n", " return out_image, out_meta\n" ] }, { "cell_type": "code", "execution_count": null, "id": "9a0e3446-16ac-40b0-9e34-db0157038c5a", "metadata": {}, "outputs": [], "source": [ "def big_zonal_stats(vec_file, tif_file, stats, col_name, n_jobs, verbose=10, timeout=10000):\n", " gdf = gpd.read_parquet(vec_file)\n", " if gdf.crs is None:\n", " gdf = gdf.set_crs(\"EPSG:4326\")\n", " gdf = gdf.rename(columns={\"geom\": \"geometry\"})\n", " gdf = gdf.set_geometry(\"geometry\")\n", " gdf = gdf[gdf[\"geometry\"].notna()].copy()\n", "\n", " with rasterio.open(tif_file) as src:\n", " raster_crs = src.crs\n", " gdf = gdf.to_crs(raster_crs) # Transform vector to raster CRS\n", " \n", " # CA bounding box + convert it to a polygon in raster CRS\n", " california_polygon = box(*gdf.total_bounds)\n", " \n", " out_image, out_transform = mask(src, [california_polygon], crop=True, nodata=src.nodata)\n", "\n", " # If raster is 3D, select the first band\n", " if out_image.ndim == 3:\n", " out_image = out_image[0]\n", "\n", " # compute zonal statistics for each geometry slice\n", " def get_stats(geom_slice):\n", " geom = [geom_slice.geometry]\n", " stats_result = zonal_stats(\n", " geom, out_image, stats=stats, affine=out_transform, all_touched=True, nodata=src.nodata\n", " )\n", " return stats_result[0] if stats_result and stats_result[0].get(\"mean\") is not None else {'mean': None}\n", "\n", " output = [get_stats(row) for row in gdf.itertuples()]\n", " gdf[col_name] = [res['mean'] for res in output]\n", "\n", " return gdf" ] }, { "cell_type": "code", "execution_count": null, "id": "ce66bae6-bac5-4837-9b01-fde16a00c303", "metadata": {}, "outputs": [], "source": [ "# getting local copies of data \n", "# aws s3 cp s3://vizzuality/hfp-100/hfp_2021_100m_v1-2_cog.tif . --endpoint-url=https://data.source.coop\n", "# aws s3 cp s3://vizzuality/lg-land-carbon-data/natcrop_bii_100m_cog.tif . --endpoint-url=https://data.source.coop\n", "# aws s3 cp s3://vizzuality/lg-land-carbon-data/natcrop_fii_100m_cog.tif . --endpoint-url=https://data.source.coop\n", "# aws s3 cp s3://vizzuality/lg-land-carbon-data/natcrop_expansion_100m_cog.tif . --endpoint-url=https://data.source.coop\n", "# aws s3 cp s3://vizzuality/lg-land-carbon-data/natcrop_reduction_100m_cog.tif . --endpoint-url=https://data.source.coop\n", "# aws s3 cp s3://cboettig/carbon/cogs/irrecoverable_c_total_2018.tif . --endpoint-url=https://data.source.coop\n", "# aws s3 cp s3://cboettig/carbon/cogs/manageable_c_total_2018.tif . --endpoint-url=https://data.source.coop\n", "# ! aws s3 cp s3://cboettig/justice40/disadvantaged-communities.parquet . --endpoint-url=https://data.source.coop\n", "# minio/shared-biodiversity/redlist/cog/combined_sr_2022.tif\n", "# /home/rstudio/minio/shared-biodiversity/redlist/cog/combined_rwr_2022.tif\n", "# ! aws s3 cp s3://cboettig/social-vulnerability/svi2020_us_tract.parquet . --endpoint-url=https://data.source.coop\n" ] }, { "cell_type": "markdown", "id": "531e7f88-1ce1-4027-b0ab-aab597e9a2b2", "metadata": {}, "source": [ "# Biodiversity Data" ] }, { "cell_type": "code", "execution_count": null, "id": "66dec912-ad8a-41cf-a5c2-6ec9cc350984", "metadata": {}, "outputs": [], "source": [ "%%time\n", "tif_file = 'SpeciesRichness_All.tif'\n", "vec_file = \"/home/rstudio/github/ca-30x30/ca2024-30m.parquet\"\n", "df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = \"richness\", n_jobs=threads, verbose=0).to_parquet(\"cpad-stats-temp.parquet\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "b081ec1a-ea91-485e-95f9-12cd06c2002a", "metadata": {}, "outputs": [], "source": [ "%%time\n", "tif_file = 'RSR_All.tif'\n", "vec_file = './cpad-stats-temp.parquet'\n", "df = big_zonal_stats(vec_file, tif_file, stats = ['mean'],\n", " col_name = \"rsr\", n_jobs=threads, verbose=0).to_parquet(\"cpad-stats-temp.parquet\")" ] }, { "cell_type": "code", "execution_count": null, "id": "d5133f36-404e-4f6a-a90b-eb5f098e6f06", "metadata": {}, "outputs": [], "source": [ "%%time\n", "tif_file = 'combined_sr_2022.tif'\n", "vec_file = './cpad-stats-temp.parquet'\n", "df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = \"all_species_richness\", n_jobs=threads, verbose=0).to_parquet(\"cpad-stats-temp.parquet\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "2ce56a66-34e3-4f61-95ae-65d1f06bc468", "metadata": {}, "outputs": [], "source": [ "%%time\n", "tif_file = 'combined_rwr_2022.tif'\n", "vec_file = './cpad-stats-temp.parquet'\n", "df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = \"all_species_rwr\", n_jobs=threads, verbose=0).to_parquet(\"cpad-stats-temp.parquet\")\n" ] }, { "cell_type": "markdown", "id": "6c129894-3775-4842-8767-f81a8f626d2c", "metadata": {}, "source": [ "# Carbon Data" ] }, { "cell_type": "code", "execution_count": null, "id": "19c3e402-8712-450f-b3dd-af9d0c01689c", "metadata": {}, "outputs": [], "source": [ "%%time\n", "tif_file = 'irrecoverable_c_total_2018.tif'\n", "vec_file = './cpad-stats-temp.parquet'\n", "df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = \"irrecoverable_carbon\", n_jobs=threads, verbose=0).to_parquet(\"cpad-stats-temp.parquet\")\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "id": "c55c777a-48ce-4403-a171-cfc0d2351df6", "metadata": {}, "outputs": [], "source": [ "%%time\n", "tif_file = 'manageable_c_total_2018.tif'\n", "vec_file = './cpad-stats-temp.parquet'\n", "df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = \"manageable_carbon\", n_jobs=threads, verbose=0).to_parquet(\"cpad-stats-temp.parquet\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "33ac0fb7-2cde-448d-a634-1973e34ac14f", "metadata": {}, "outputs": [], "source": [ "%%time\n", "tif_file = 'deforest_carbon_100m_cog.tif'\n", "vec_file = './cpad-stats-temp.parquet'\n", "df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], \n", " col_name = \"deforest_carbon\", n_jobs=threads, verbose=0).to_parquet(\"cpad-stats-temp.parquet\")\n" ] }, { "cell_type": "markdown", "id": "096c00a8-57af-41d7-93cc-85d85414aa4f", "metadata": {}, "source": [ "# Human Impact Data" ] }, { "cell_type": "code", "execution_count": null, "id": "d2a8c10f-e94b-4eef-940f-2af9599edee1", "metadata": {}, "outputs": [], "source": [ "%%time\n", "tif_file = 'natcrop_bii_100m_cog.tif'\n", "vec_file = './cpad-stats-temp.parquet'\n", "df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], \n", " col_name = \"biodiversity_intactness_loss\", n_jobs=threads, verbose=0).to_parquet(\"cpad-stats-temp.parquet\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "1c318f39-7ca0-4f3c-80fb-73f72202e4e0", "metadata": {}, "outputs": [], "source": [ "%%time\n", "tif_file = 'natcrop_fii_100m_cog.tif'\n", "vec_file = './cpad-stats-temp.parquet'\n", "df = big_zonal_stats(vec_file, tif_file, stats = ['mean'],\n", " col_name = \"forest_integrity_loss\", n_jobs=threads, verbose=0).to_parquet(\"cpad-stats-temp.parquet\")\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "id": "aef9070a-c87a-463e-81b8-3cc6c5c9d484", "metadata": {}, "outputs": [], "source": [ "%%time\n", "tif_file = 'natcrop_expansion_100m_cog.tif'\n", "vec_file = './cpad-stats-temp.parquet'\n", "df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = \"crop_expansion\", n_jobs=threads, verbose=0)\n", "gpd.GeoDataFrame(df, geometry=\"geometry\").to_parquet(\"cpad-stats-temp.parquet\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "d94f937b-b32c-4de1-b4ac-93ce33f0919f", "metadata": {}, "outputs": [], "source": [ "%%time\n", "tif_file = 'natcrop_reduction_100m_cog.tif'\n", "vec_file = './cpad-stats-temp.parquet'\n", "df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = \"crop_reduction\", n_jobs=threads, verbose=0).to_parquet(\"cpad-stats-temp.parquet\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "6bdaba61-30c1-49d6-a4e6-db68f1daafa3", "metadata": {}, "outputs": [], "source": [ "%%time\n", "tif_file = 'hfp_2021_100m_v1-2_cog.tif'\n", "vec_file = './cpad-stats-temp.parquet'\n", "df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = \"human_impact\", n_jobs=threads, verbose=0).to_parquet(\"cpad-stats-temp.parquet\")\n" ] }, { "cell_type": "markdown", "id": "f8e037d4-7a34-42bc-941f-0c09ee80ef3b", "metadata": {}, "source": [ "# Need to convert SVI & Justice40 files to tif" ] }, { "cell_type": "code", "execution_count": null, "id": "c4a19013-65f1-4eef-be2d-0cf1be3d0f7f", "metadata": {}, "outputs": [], "source": [ "import geopandas as gpd\n", "import numpy as np\n", "import rasterio\n", "from rasterio.features import rasterize\n", "from rasterio.transform import from_bounds\n", "\n", "def get_geotiff(gdf, output_file,col):\n", " gdf = gdf.set_geometry(\"geometry\")\n", " gdf = gdf.set_crs(\"EPSG:4326\")\n", " print(gdf.crs)\n", "\n", " # Set raster properties\n", " minx, miny, maxx, maxy = gdf.total_bounds # Get the bounds of the geometry\n", " pixel_size = 0.01 # Define the pixel size in units of the CRS\n", " width = int((maxx - minx) / pixel_size)\n", " height = int((maxy - miny) / pixel_size)\n", " transform = from_bounds(minx, miny, maxx, maxy, width, height)\n", " \n", " # Define rasterization with continuous values\n", " shapes = ((geom, value) for geom, value in zip(gdf.geometry, gdf[col]))\n", " raster = rasterize(\n", " shapes,\n", " out_shape=(height, width),\n", " transform=transform,\n", " fill=0.0, # Background value for areas outside the geometry\n", " dtype=\"float32\" # Set data type to handle continuous values\n", " )\n", " print(\"Unique values in raster:\", np.unique(raster))\n", "\n", " # Define GeoTIFF metadata\n", " out_meta = {\n", " \"driver\": \"GTiff\",\n", " \"height\": height,\n", " \"width\": width,\n", " \"count\": 1,\n", " \"dtype\": raster.dtype,\n", " \"crs\": gdf.crs,\n", " \"transform\": transform,\n", " \"compress\": \"deflate\" # Use compression to reduce file size\n", " }\n", " \n", " # Write to a GeoTIFF file with COG options\n", " with rasterio.open(output_file, \"w\", **out_meta) as dest:\n", " dest.write(raster, 1)\n", " dest.build_overviews([2, 4, 8, 16], rasterio.enums.Resampling.average)\n", " dest.update_tags(1, TIFFTAG_RESOLUTION_UNIT=\"Meter\")\n" ] }, { "cell_type": "markdown", "id": "f4925a74-5ed2-49a4-845b-6a0f0398a43e", "metadata": {}, "source": [ "# SVI" ] }, { "cell_type": "code", "execution_count": null, "id": "4e678f01-73af-4f99-a565-e9b7f04d9547", "metadata": {}, "outputs": [], "source": [ "# clean up SVI data\n", "svi_df = (con\n", " .read_parquet(\"svi2020_us_tract.parquet\")\n", " .select(\"RPL_THEMES\",\"RPL_THEME1\",\"RPL_THEME2\",\"RPL_THEME3\",\"RPL_THEME4\",\"Shape\")\n", " .rename(SVI = \"RPL_THEMES\", socioeconomic = \"RPL_THEME1\", \n", " household_char = \"RPL_THEME2\", racial_ethnic_minority = \"RPL_THEME3\",\n", " housing_transit = \"RPL_THEME4\", geometry = \"Shape\")\n", ".cast({\"geometry\":\"geometry\"})\n", ")\n", "svi_df.execute().to_parquet(\"svi2020_us_tract_clean.parquet\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "c5046d6b-9798-46d3-a1bc-548e29414007", "metadata": {}, "outputs": [], "source": [ "gdf = gpd.read_parquet(\"svi2020_us_tract_clean.parquet\")\n", "svi = gdf[['SVI','geometry']]\n", "socio = gdf[['socioeconomic','geometry']]\n", "house = gdf[['household_char','geometry']]\n", "minority = gdf[['racial_ethnic_minority','geometry']]\n", "transit = gdf[['housing_transit','geometry']]\n", "\n", "#convert SVI parquet to tif\n", "get_geotiff(svi,\"svi.tif\",\"SVI\")\n", "get_geotiff(socio,\"svi_socioeconomic.tif\",\"socioeconomic\")\n", "get_geotiff(house,\"svi_household.tif\",\"household_char\")\n", "get_geotiff(minority,\"svi_minority.tif\",\"racial_ethnic_minority\")\n", "get_geotiff(transit,\"svi_transit.tif\",\"housing_transit\")" ] }, { "cell_type": "code", "execution_count": null, "id": "6a36b77f-d0be-45bd-9318-da4b57eaf353", "metadata": {}, "outputs": [], "source": [ "%%time\n", "tif_file = 'svi.tif'\n", "vec_file = './cpad-stats-temp.parquet'\n", "df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = \"SVI\", n_jobs=threads, verbose=0).to_parquet(\"cpad-stats-temp.parquet\")\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "id": "05ef74e2-3f23-4f69-8cd3-8862cb73a259", "metadata": {}, "outputs": [], "source": [ "%%time\n", "vec_file = './cpad-stats-temp.parquet'\n", "tif_file = 'svi_socioeconomic.tif'\n", "df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = \"socioeconomic_status\", n_jobs=threads, verbose=0).to_parquet(\"cpad-stats-temp.parquet\")\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "id": "23417a03-38c2-4b31-8340-f08ec8a11631", "metadata": {}, "outputs": [], "source": [ "%%time\n", "vec_file = './cpad-stats-temp.parquet'\n", "tif_file = 'svi_household.tif'\n", "df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = \"household_char\", n_jobs=threads, verbose=0).to_parquet(\"cpad-stats-temp.parquet\")\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "id": "de86d7f0-6cdc-4d05-bdee-d9803cbd83bd", "metadata": {}, "outputs": [], "source": [ "%%time\n", "vec_file = './cpad-stats-temp.parquet'\n", "tif_file = 'svi_minority.tif'\n", "df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = \"racial_ethnic_minority\", n_jobs=threads, verbose=0).to_parquet(\"cpad-stats-temp.parquet\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "0c49dd50-7dd3-4240-9af8-3e32ec656bc0", "metadata": {}, "outputs": [], "source": [ "%%time\n", "vec_file = './cpad-stats-temp.parquet'\n", "tif_file = 'svi_transit.tif'\n", "df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = \"housing_transit\", n_jobs=threads, verbose=0).to_parquet(\"cpad-stats-temp.parquet\")\n" ] }, { "cell_type": "markdown", "id": "ff4b6604-9828-4882-90bd-554c21f5c6e6", "metadata": {}, "source": [ "# Justice40 " ] }, { "cell_type": "code", "execution_count": null, "id": "3678a91f-72f7-4339-a409-a97776cba043", "metadata": {}, "outputs": [], "source": [ "#clean up\n", "justice40 = (con\n", " .read_parquet(\"disadvantaged-communities.parquet\")\n", " .rename(geometry = \"SHAPE\",justice40=\"Disadvan\")\n", " .filter(_.StateName == \"California\")\n", " .mutate(geometry = _.geometry.convert(\"ESRI:102039\",\"EPSG:4326\"))\n", " .select(\"justice40\",\"geometry\")\n", " )\n", "justice40.execute().to_parquet(\"ca_justice40.parquet\")" ] }, { "cell_type": "code", "execution_count": null, "id": "8faa425f-6f9c-4189-a53a-24dd0250c539", "metadata": {}, "outputs": [], "source": [ "# #justice40 is either 0 or 1, so we want to get the percentage of polygon where justice40 = 1. \n", "\n", "def big_zonal_stats_binary(vec_file, justice40_file, col_name,projected_crs=\"EPSG:3310\"):\n", " # Read both vector files as GeoDataFrames\n", " gdf = gpd.read_parquet(vec_file)\n", " justice40_gdf = gpd.read_parquet(justice40_file)\n", " \n", " # Set CRS if not already set (assuming both should be in EPSG:4326, modify if needed)\n", " if gdf.crs is None:\n", " gdf = gdf.set_crs(\"EPSG:4326\")\n", " if justice40_gdf.crs is None:\n", " justice40_gdf = justice40_gdf.set_crs(\"EPSG:4326\")\n", " # Ensure both GeoDataFrames are in the same CRS and reproject to a projected CRS for area calculations\n", " gdf = gdf.to_crs(projected_crs)\n", " justice40_gdf = justice40_gdf.to_crs(projected_crs)\n", " \n", " # Ensure both GeoDataFrames are in the same CRS\n", " gdf = gdf.to_crs(justice40_gdf.crs)\n", " \n", " # Filter justice40 polygons where justice40 == 1\n", " justice40_gdf = justice40_gdf[justice40_gdf['justice40'] == 1].copy()\n", " \n", " # Prepare a list to hold percentage of justice40 == 1 for each polygon\n", " percentages = []\n", " \n", " # Iterate over each polygon in the main GeoDataFrame\n", " for geom in gdf.geometry:\n", " # Find intersecting justice40 polygons\n", " justice40_intersections = justice40_gdf[justice40_gdf.intersects(geom)].copy()\n", " \n", " # Calculate the intersection area\n", " if not justice40_intersections.empty:\n", " justice40_intersections['intersection'] = justice40_intersections.intersection(geom)\n", " total_intersection_area = justice40_intersections['intersection'].area.sum()\n", " \n", " # Calculate percentage based on original polygon's area\n", " percentage_1 = (total_intersection_area / geom.area) \n", " else:\n", " percentage_1 = 0.0 # No intersection with justice40 == 1 polygons\n", " \n", " # Append result\n", " percentages.append(percentage_1)\n", " \n", " # Add results to the original GeoDataFrame\n", " gdf[col_name] = percentages\n", " return gdf\n", "\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "id": "fe80fc28-73ce-4a26-9925-851c2798e467", "metadata": {}, "outputs": [], "source": [ "%%time\n", "vec_file = './cpad-stats-temp.parquet'\n", "\n", "df = big_zonal_stats_binary(vec_file, \"ca_justice40.parquet\", col_name=\"percent_disadvantaged\")\n", "df.to_parquet(\"cpad-stats-temp.parquet\")\n" ] }, { "cell_type": "markdown", "id": "5438a4f4-377e-41fe-800b-8ffc1f33caa0", "metadata": {}, "source": [ "# Fire" ] }, { "cell_type": "code", "execution_count": null, "id": "4bd83b4d-01df-49d8-99e1-6740d365c833", "metadata": {}, "outputs": [], "source": [ "import geopandas as gpd\n", "\n", "#get percentage of polygon with fire occurrence \n", "def fire_stats(file_name, fire_df, col_name):\n", " gdf = gpd.read_parquet(file_name)\n", " \n", " percentages = []\n", " # Find all fires that intersect with the current protected area \n", " for geom in gdf.geometry:\n", " fire_intersections = fire_df[fire_df.intersects(geom)].copy()\n", " if not fire_intersections.empty:\n", " # If there is only one intersecting fire, compute the intersection area\n", " if len(fire_intersections) == 1:\n", " intersection_area = fire_intersections.geometry.iloc[0].intersection(geom).area\n", " else:\n", " # If there are multiple intersecting fires, use a union to avoid double-counting\n", " unioned_fires = fire_intersections.unary_union\n", " intersection_area = unioned_fires.intersection(geom).area\n", " \n", " percentage_1 = round((intersection_area / geom.area),3)\n", " else:\n", " percentage_1 = 0.0 \n", "\n", " percentages.append(percentage_1)\n", " \n", " gdf[col_name] = percentages\n", " return gdf\n" ] }, { "cell_type": "code", "execution_count": null, "id": "4ce35cea-8897-42c0-b1f6-01b414a5b556", "metadata": {}, "outputs": [], "source": [ "#historical fire perimeters \n", "fire_20 = (con\n", " .read_parquet(\"firep22_1.parquet\")\n", " .rename(year = \"YEAR_\")\n", " .filter(_.STATE == \"CA\", _.year != '')\n", " .cast({\"year\":\"int\"})\n", " .filter(_.year>=2003)\n", " .select(\"year\",\"geometry\")\n", " .mutate(\n", " geometry=ibis.ifelse(\n", " _.geometry.is_valid(),\n", " _.geometry, # Keep the geometry if it's valid\n", " _.geometry.buffer(0) # Apply buffer(0) to fix invalid geometries\n", " )\n", " )\n", " )\n", "fire_20.execute().to_parquet(\"ca-fire-20yrs.parquet\")\n", "fire_10 = fire_20.filter(_.year>=2013)\n", "fire_5 = fire_20.filter(_.year>=2018)\n", "fire_2 = fire_20.filter(_.year>=2022)\n", "\n", "\n", "fire_20_df = fire_20.execute().set_crs(\"EPSG:3310\")\n", "fire_10_df = fire_10.execute().set_crs(\"EPSG:3310\")\n", "fire_5_df = fire_5.execute().set_crs(\"EPSG:3310\")\n", "fire_2_df = fire_2.execute().set_crs(\"EPSG:3310\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "0a041210-6ffe-49b0-b4a7-3a9220acedb9", "metadata": {}, "outputs": [], "source": [ "#prescribed burns\n", "rxburn_20 = (con\n", " .read_parquet(\"rxburn22_1.parquet\")\n", " .rename(year = \"YEAR_\")\n", " .filter(_.STATE == \"CA\", _.year != ' ', _.year != '')\n", " .cast({\"year\":\"int\"})\n", " .filter(_.year>=2003)\n", " .select(\"year\",\"geometry\")\n", " .mutate(\n", " geometry=ibis.ifelse(\n", " _.geometry.is_valid(),\n", " _.geometry, # Keep the geometry if it's valid\n", " _.geometry.buffer(0) # Apply buffer(0) to fix invalid geometries\n", " )\n", " )\n", " )\n", "\n", "rxburn_20.execute().to_parquet(\"ca-rxburn-20yrs.parquet\")\n", "rxburn_10 = (rxburn_20.filter(_.year>=2013))\n", "rxburn_5 = (rxburn_20.filter(_.year>=2018))\n", "rxburn_2 = (rxburn_20.filter(_.year>=2022))\n", "\n", "rxburn_20_df = rxburn_20.execute().set_crs(\"EPSG:3310\")\n", "rxburn_10_df = rxburn_10.execute().set_crs(\"EPSG:3310\")\n", "rxburn_5_df = rxburn_5.execute().set_crs(\"EPSG:3310\")\n", "rxburn_2_df = rxburn_2.execute().set_crs(\"EPSG:3310\")" ] }, { "cell_type": "code", "execution_count": null, "id": "fc955b02-efc1-4ae3-b8e4-ea424d491a68", "metadata": {}, "outputs": [], "source": [ "# need to validate geometries, using epsg:3310 to match fire polygons\n", "ca = (con\n", " .read_parquet('cpad-stats-temp.parquet')\n", " .mutate(geom = _.geom.convert(\"EPSG:4326\",\"EPSG:3310\"))\n", " .mutate(\n", " geometry=ibis.ifelse(\n", " _.geom.is_valid(),\n", " _.geom, # Keep the geometry if it's valid\n", " _.geom.buffer(0) # Apply buffer(0) to fix invalid geometries\n", " )\n", " )\n", " .drop('geom')\n", " )\n", "gdf = ca.execute()\n", "gdf = gdf.set_crs('EPSG:3310')\n", "gdf.to_parquet('cpad-stats-temp-EPSG3310.parquet')\n" ] }, { "cell_type": "code", "execution_count": null, "id": "68e25266-efc8-4378-afc5-95c7a769ca81", "metadata": {}, "outputs": [], "source": [ "%%time\n", "file_name = 'cpad-stats-temp-EPSG3310.parquet'\n", "\n", "names = [\"percent_fire_20yr\", \"percent_fire_10yr\", \"percent_fire_5yr\",\n", " \"percent_fire_2yr\",\"percent_rxburn_20yr\", \"percent_rxburn_10yr\", \n", " \"percent_rxburn_5yr\",\"percent_rxburn_2yr\"]\n", "dfs = [fire_20_df,fire_10_df,fire_5_df,fire_2_df,rxburn_20_df,rxburn_10_df,rxburn_5_df,rxburn_2_df]\n", "\n", "for df,name in zip(dfs,names):\n", " df_stat = fire_stats(file_name,df, col_name=name)\n", " df_stat.to_parquet(file_name)" ] }, { "cell_type": "code", "execution_count": null, "id": "cd4acb35-d1a3-4632-ae30-c6e3e923e94c", "metadata": {}, "outputs": [], "source": [ "#save data back to cpad-stats-temp\n", "# (not really necessary but I want to reuse the same code)\n", "ca = (con\n", " .read_parquet(file_name)\n", " .mutate(geometry = _.geometry.convert(\"EPSG:3310\",\"EPSG:4326\"))\n", " )\n", "gdf = ca.execute()\n", "gdf= gdf.set_crs('EPSG:4326')\n", "gdf.to_parquet(\"cpad-stats-temp.parquet\")\n", "\n" ] }, { "cell_type": "markdown", "id": "e3083b85-1322-4188-ac08-e73c2570978c", "metadata": {}, "source": [ "# Cleaning up + Rounding floats" ] }, { "cell_type": "code", "execution_count": null, "id": "2e4de199-82d4-4e2b-8572-6fe19b57d1ee", "metadata": {}, "outputs": [], "source": [ "## clean up\n", "con = ibis.duckdb.connect(extensions=[\"spatial\"])\n", "ca_geom = con.read_parquet(\"ca2024-30m.parquet\").cast({\"geom\":\"geometry\"}).select(\"id\",\"geom\")\n", "\n", "ca = (con\n", " .read_parquet(\"cpad-stats-temp.parquet\")\n", " .cast({\n", " \"crop_expansion\": \"int64\",\n", " \"crop_reduction\": \"int64\",\n", " \"manageable_carbon\": \"int64\",\n", " \"irrecoverable_carbon\": \"int64\"\n", " })\n", " .mutate(\n", " richness=_.richness.round(3),\n", " rsr=_.rsr.round(3),\n", " all_species_rwr=_.all_species_rwr.round(3),\n", " all_species_richness=_.all_species_richness.round(3),\n", " percent_disadvantaged=(_.percent_disadvantaged).round(3),\n", " svi=_.svi.round(3),\n", " svi_socioeconomic_status=_.socioeconomic_status.round(3),\n", " svi_household_char=_.household_char.round(3),\n", " svi_racial_ethnic_minority=_.racial_ethnic_minority.round(3),\n", " svi_housing_transit=_.housing_transit.round(3),\n", " human_impact=_.human_impact.round(3),\n", " deforest_carbon=_.deforest_carbon.round(3),\n", " biodiversity_intactness_loss=_.biodiversity_intactness_loss.round(3),\n", " forest_integrity_loss=_.forest_integrity_loss.round(3),\n", " percent_fire_20yr = _.percent_fire_20yr.round(3),\n", " percent_fire_10yr = _.percent_fire_10yr.round(3),\n", " percent_fire_5yr = _.percent_fire_5yr.round(3),\n", " percent_fire_2yr = _.percent_fire_2yr.round(3),\n", " percent_rxburn_20yr = _.percent_rxburn_20yr.round(3),\n", " percent_rxburn_10yr = _.percent_rxburn_10yr.round(3),\n", " percent_rxburn_5yr = _.percent_rxburn_5yr.round(3),\n", " percent_rxburn_2yr = _.percent_rxburn_2yr.round(3),\n", " )\n", " # only grabbing columns we are making charts with \n", " .select('established', 'reGAP', 'name', 'access_type', 'manager', 'manager_type', 'Easement', 'Acres', 'id', 'type','richness', \n", " 'rsr', 'irrecoverable_carbon', 'manageable_carbon', 'percent_fire_20yr', 'percent_fire_10yr', 'percent_fire_5yr','percent_fire_2yr',\n", " 'percent_rxburn_20yr', 'percent_rxburn_10yr', 'percent_rxburn_5yr','percent_rxburn_2yr', 'percent_disadvantaged',\n", " 'svi', 'svi_socioeconomic_status', 'svi_household_char', 'svi_racial_ethnic_minority',\n", " 'svi_housing_transit', 'deforest_carbon','human_impact'\n", " )\n", " .join(ca_geom, \"id\", how=\"inner\")\n", " )\n", "\n", "ca.head(5).execute()\n" ] }, { "cell_type": "markdown", "id": "3780de2c-3a68-442c-bb3b-64c792418979", "metadata": {}, "source": [ "# Save as PMTiles + Upload data" ] }, { "cell_type": "code", "execution_count": null, "id": "05c791c9-888a-483a-9dbb-a2ba7eb1bce2", "metadata": {}, "outputs": [], "source": [ "import subprocess\n", "import os\n", "from huggingface_hub import HfApi, login\n", "import streamlit as st\n", "\n", "login(st.secrets[\"HF_TOKEN\"])\n", "# api = HfApi(add_to_git_credential=False)\n", "api = HfApi()\n", "\n", "def hf_upload(file, repo_id,repo_type):\n", " info = api.upload_file(\n", " path_or_fileobj=file,\n", " path_in_repo=file,\n", " repo_id=repo_id,\n", " repo_type=repo_type,\n", " )\n", "def generate_pmtiles(input_file, output_file, max_zoom=12):\n", " # Ensure Tippecanoe is installed\n", " if subprocess.call([\"which\", \"tippecanoe\"], stdout=subprocess.DEVNULL) != 0:\n", " raise RuntimeError(\"Tippecanoe is not installed or not in PATH\")\n", "\n", " # Construct the Tippecanoe command\n", " command = [\n", " \"tippecanoe\",\n", " \"-o\", output_file,\n", " \"-zg\",\n", " \"--extend-zooms-if-still-dropping\",\n", " \"--force\",\n", " \"--projection\", \"EPSG:4326\", \n", " \"-L\",\"layer:\"+input_file,\n", " ]\n", " # Run Tippecanoe\n", " try:\n", " subprocess.run(command, check=True)\n", " print(f\"Successfully generated PMTiles file: {output_file}\")\n", " except subprocess.CalledProcessError as e:\n", " print(f\"Error running Tippecanoe: {e}\")\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "id": "1f2d179d-6d47-4e84-83c6-7cb3d969fc00", "metadata": {}, "outputs": [], "source": [ "gdf = ca.execute().set_crs(\"EPSG:4326\")\n", "gdf.to_file(\"cpad-stats.geojson\")\n", "\n", "generate_pmtiles(\"cpad-stats.geojson\", \"cpad-stats.pmtiles\")\n", "hf_upload(\"cpad-stats.pmtiles\", \"boettiger-lab/ca-30x30\",\"dataset\")\n", "\n", "gdf.to_parquet(\"cpad-stats.parquet\")\n", "hf_upload(\"cpad-stats.parquet\", \"boettiger-lab/ca-30x30\",\"dataset\")\n", "hf_upload(\"cpad-stats.parquet\", \"boettiger-lab/ca-30x30\",\"space\")\n", "\n" ] }, { "cell_type": "markdown", "id": "09467342-c160-413b-9cdc-31a4bec968cf", "metadata": {}, "source": [ "# Redoing fire polygons pmtiles to have each range be its own layer " ] }, { "cell_type": "code", "execution_count": null, "id": "2161c50b-0328-474f-aa57-215e14fe33c2", "metadata": {}, "outputs": [], "source": [ "def generate_pmtiles(input_file1, input_file2, input_file3, input_file4, output_file, max_zoom=12):\n", " # Ensure Tippecanoe is installed\n", " if subprocess.call([\"which\", \"tippecanoe\"], stdout=subprocess.DEVNULL) != 0:\n", " raise RuntimeError(\"Tippecanoe is not installed or not in PATH\")\n", "\n", " # Construct the Tippecanoe command\n", " command = [\n", " \"tippecanoe\",\n", " \"-o\", output_file,\n", " \"-zg\",\n", " \"--extend-zooms-if-still-dropping\",\n", " \"--force\",\n", " \"--projection\", \"EPSG:4326\", \n", " \"-L\",\"layer1:\"+input_file1,\n", " \"-L\",\"layer2:\"+input_file2,\n", " \"-L\",\"layer3:\"+input_file3,\n", " \"-L\",\"layer4:\"+input_file4,\n", "\n", " ]\n", " # Run Tippecanoe\n", " try:\n", " subprocess.run(command, check=True)\n", " print(f\"Successfully generated PMTiles file: {output_file}\")\n", " except subprocess.CalledProcessError as e:\n", " print(f\"Error running Tippecanoe: {e}\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "3a15d11f-ef32-4af3-8b72-b43acd43cf08", "metadata": {}, "outputs": [], "source": [ "rxburn_20 = (con\n", " .read_parquet(\"rxburn22_1.parquet\")\n", " .rename(year = \"YEAR_\")\n", " .filter(_.STATE == \"CA\", _.year != ' ', _.year != '')\n", " .cast({\"year\":\"int\"})\n", " .filter(_.year>=2003)\n", " .mutate(\n", " geometry=ibis.ifelse(\n", " _.geometry.is_valid(),\n", " _.geometry, # Keep the geometry if it's valid\n", " _.geometry.buffer(0) # Apply buffer(0) to fix invalid geometries\n", " )\n", " )\n", " .mutate(geometry = _.geometry.convert(\"EPSG:3310\",\"EPSG:4326\"))\n", " )\n", "\n", "rxburn_10 = (rxburn_20.filter(_.year>=2013))\n", "rxburn_5 = (rxburn_20.filter(_.year>=2018))\n", "rxburn_2 = (rxburn_20.filter(_.year>=2022))\n", "\n", "rxburn_20_df = rxburn_20.execute().set_crs(\"EPSG:4326\").to_file(\"rxburn_20.geojson\")\n", "rxburn_10_df = rxburn_10.execute().set_crs(\"EPSG:4326\").to_file(\"rxburn_10.geojson\")\n", "rxburn_5_df = rxburn_5.execute().set_crs(\"EPSG:4326\").to_file(\"rxburn_5.geojson\")\n", "rxburn_2_df = rxburn_2.execute().set_crs(\"EPSG:4326\").to_file(\"rxburn_2.geojson\")\n", "\n", "\n", "generate_pmtiles(\"rxburn_20.geojson\",\"rxburn_10.geojson\",\"rxburn_5.geojson\",\"rxburn_2.geojson\",\"cal_rxburn_2022.pmtiles\")\n", "hf_upload(\"cal_rxburn_2022.pmtiles\", \"boettiger-lab/ca-30x30\",\"dataset\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "1220c348-c68b-4475-ba0f-ef563fea7345", "metadata": {}, "outputs": [], "source": [ "fire_20 = (con\n", " .read_parquet(\"firep22_1.parquet\")\n", " .rename(year = \"YEAR_\")\n", " .filter(_.STATE == \"CA\", _.year != '')\n", " .cast({\"year\":\"int\"})\n", " .filter(_.year>=2003)\n", " .select(\"year\",\"geometry\")\n", " .mutate(\n", " geometry=ibis.ifelse(\n", " _.geometry.is_valid(),\n", " _.geometry, # Keep the geometry if it's valid\n", " _.geometry.buffer(0) # Apply buffer(0) to fix invalid geometries\n", " )\n", " )\n", " .mutate(geometry = _.geometry.convert(\"EPSG:3310\",\"EPSG:4326\"))\n", " )\n", "\n", "fire_10 = (fire_20.filter(_.year>=2013))\n", "fire_5 = (fire_20.filter(_.year>=2018))\n", "fire_2 = (fire_20.filter(_.year>=2022))\n", "\n", "fire_20_df = fire_20.execute().set_crs(\"EPSG:4326\").to_file(\"fire_20.geojson\")\n", "fire_10_df = fire_10.execute().set_crs(\"EPSG:4326\").to_file(\"fire_10.geojson\")\n", "fire_5_df = fire_5.execute().set_crs(\"EPSG:4326\").to_file(\"fire_5.geojson\")\n", "fire_2_df = fire_2.execute().set_crs(\"EPSG:4326\").to_file(\"fire_2.geojson\")\n", "\n", "\n", "generate_pmtiles(\"fire_20.geojson\",\"fire_10.geojson\",\"fire_5.geojson\",\"fire_2.geojson\",\"cal_fire_2022.pmtiles\")\n", "hf_upload(\"cal_fire_2022.pmtiles\", \"boettiger-lab/ca-30x30\",\"dataset\")\n" ] }, { "cell_type": "markdown", "id": "41ddf636-812e-4f0d-81db-64cf80cb2d4d", "metadata": {}, "source": [ "# Renaming variables, adding new columns, etc" ] }, { "cell_type": "code", "execution_count": null, "id": "8eb85005-856f-4cc5-ba8d-e3efb24cdb32", "metadata": {}, "outputs": [], "source": [ "ca = (con\n", " .read_parquet(\"https://huggingface.co/spaces/boettiger-lab/ca-30x30/resolve/main/cpad-stats.parquet\")\n", " .rename(easement = \"Easement\")\n", " .rename(acres = \"Acres\")\n", " .drop('percent_fire_20yr', 'percent_fire_5yr','percent_fire_2yr','percent_rxburn_20yr', 'percent_rxburn_5yr','percent_rxburn_2yr')\n", " .cast({\"established\":\"str\"})\n", " .mutate(easement = _.easement.substitute({\"Easement\": \"True\", \"Fee\":\"False\"}),\n", " established = _.established.substitute({\"2023\": \"pre-2024\" }),\n", " )\n", " )" ] }, { "cell_type": "code", "execution_count": null, "id": "78eef2b6-5f34-49b6-937e-4744fd64cea8", "metadata": {}, "outputs": [], "source": [ "hf_upload(\"cpad-stats.parquet\", \"boettiger-lab/ca-30x30\",\"space\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "652152fd-da31-44a0-bc50-9d3aa0fe6491", "metadata": {}, "outputs": [], "source": [ "gdf = ca.execute().set_crs(\"EPSG:4326\")\n", "gdf.to_parquet(\"cpad-stats.parquet\")\n", "# hf_upload(\"cpad-stats.parquet\", \"boettiger-lab/ca-30x30\",\"dataset\")\n", "hf_upload(\"cpad-stats.parquet\", \"boettiger-lab/ca-30x30\",\"space\")\n", "\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "id": "80537a24-da0c-4016-9d8b-736bce30eb40", "metadata": {}, "outputs": [], "source": [ "gdf.to_file(\"cpad-stats.geojson\")\n", "generate_pmtiles(\"cpad-stats.geojson\",\"cpad-stats.pmtiles\")\n", "hf_upload(\"cpad-stats.pmtiles\", \"boettiger-lab/ca-30x30\",\"dataset\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "b0a5521b-8159-495b-a9a1-b78574fe2ceb", "metadata": {}, "outputs": [], "source": [ "hf_upload(\"cpad-stats.parquet\", \"boettiger-lab/ca-30x30-folium\",\"space\")\n" ] }, { "cell_type": "markdown", "id": "7727c253-813a-40e6-b73a-e973514606f3", "metadata": {}, "source": [ "# Rounding acres " ] }, { "cell_type": "code", "execution_count": null, "id": "9f427c9d-6b87-4bc0-a5d7-66f16a9bec77", "metadata": {}, "outputs": [], "source": [ "# foliumap tooltip looks messy so I am rounding the acres value.\n", "parquet = \"cpad-stats.parquet\"\n", "ca = (con\n", " .read_parquet(parquet)\n", " .mutate(acres = _.acres.round(4)\n", " )\n", " )\n", "\n", "gdf = ca.execute().set_crs(\"EPSG:4326\")\n", "gdf.to_parquet(\"cpad-stats.parquet\")\n", "## didn't need to upload parquet since the rounding doesn't impact this?\n", "hf_upload(\"cpad-stats.parquet\", \"boettiger-lab/ca-30x30\",\"dataset\")\n", "# hf_upload(\"cpad-stats.parquet\", \"boettiger-lab/ca-30x30\",\"space\")\n", "# hf_upload(\"cpad-stats.parquet\", \"boettiger-lab/ca-30x30-folium\",\"space\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "9d949c80-c572-4ee2-aa73-563c9ac5a649", "metadata": {}, "outputs": [], "source": [ "gdf.to_file(\"cpad-stats.geojson\")\n", "generate_pmtiles(\"cpad-stats.geojson\",\"cpad-stats.pmtiles\")\n", "hf_upload(\"cpad-stats.pmtiles\", \"boettiger-lab/ca-30x30\",\"dataset\")\n" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.7" } }, "nbformat": 4, "nbformat_minor": 5 }