Spaces:
Running
Running
update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,803 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
import json
|
3 |
import ee
|
@@ -44,7 +844,7 @@ st.markdown(
|
|
44 |
""",
|
45 |
unsafe_allow_html=True,
|
46 |
)
|
47 |
-
st.write("<h2><div style='text-align: center;'>User
|
48 |
|
49 |
# Authenticate and initialize Earth Engine
|
50 |
earthengine_credentials = os.environ.get("EE_Authentication")
|
@@ -73,42 +873,52 @@ if main_selection:
|
|
73 |
|
74 |
# Display the selected dataset ID based on user input
|
75 |
if sub_selection:
|
76 |
-
st.write(f"You selected: {main_selection} -> {sub_selection}")
|
77 |
-
st.write(f"Dataset ID: {
|
78 |
-
|
79 |
-
# Fetch the correct dataset ID from the sub-selection
|
80 |
-
dataset_id = sub_options[sub_selection]
|
81 |
|
82 |
# Earth Engine Index Calculator Section
|
83 |
st.header("Earth Engine Index Calculator")
|
84 |
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
#
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
st.
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
|
113 |
# Function to get the corresponding reducer based on user input
|
114 |
def get_reducer(reducer_name):
|
@@ -123,8 +933,6 @@ def get_reducer(reducer_name):
|
|
123 |
'max': ee.Reducer.max(),
|
124 |
'count': ee.Reducer.count(),
|
125 |
}
|
126 |
-
|
127 |
-
# Default to 'mean' if the reducer_name is not recognized
|
128 |
return reducers.get(reducer_name.lower(), ee.Reducer.mean())
|
129 |
|
130 |
# Streamlit selectbox for reducer choice
|
@@ -134,80 +942,43 @@ reducer_choice = st.selectbox(
|
|
134 |
index=0 # Default to 'mean'
|
135 |
)
|
136 |
|
|
|
137 |
def convert_to_ee_geometry(geometry):
|
138 |
-
# Handle Shapely geometry
|
139 |
if isinstance(geometry, base.BaseGeometry):
|
140 |
if geometry.is_valid:
|
141 |
geojson = geometry.__geo_interface__
|
142 |
-
print("Shapely GeoJSON:", geojson) # Debugging: Inspect the GeoJSON structure
|
143 |
return ee.Geometry(geojson)
|
144 |
else:
|
145 |
raise ValueError("Invalid geometry: The polygon geometry is not valid.")
|
146 |
-
|
147 |
-
# Handle GeoJSON input (string or dictionary)
|
148 |
elif isinstance(geometry, dict) or isinstance(geometry, str):
|
149 |
try:
|
150 |
if isinstance(geometry, str):
|
151 |
geometry = json.loads(geometry)
|
152 |
if 'type' in geometry and 'coordinates' in geometry:
|
153 |
-
print("GeoJSON Geometry:", geometry) # Debugging: Inspect the GeoJSON structure
|
154 |
return ee.Geometry(geometry)
|
155 |
else:
|
156 |
raise ValueError("GeoJSON format is invalid.")
|
157 |
except Exception as e:
|
158 |
raise ValueError(f"Error parsing GeoJSON: {e}")
|
159 |
-
|
160 |
-
# Handle KML input (string or file path)
|
161 |
elif isinstance(geometry, str) and geometry.lower().endswith(".kml"):
|
162 |
try:
|
163 |
-
# Parse the KML file
|
164 |
tree = ET.parse(geometry)
|
165 |
kml_root = tree.getroot()
|
166 |
-
|
167 |
-
# Extract coordinates from KML geometry (assuming it's a Polygon or MultiPolygon)
|
168 |
-
# KML coordinates are usually within the <coordinates> tag
|
169 |
kml_namespace = {'kml': 'http://www.opengis.net/kml/2.2'}
|
170 |
coordinates = kml_root.findall(".//kml:coordinates", kml_namespace)
|
171 |
-
|
172 |
if coordinates:
|
173 |
-
# Extract and format coordinates
|
174 |
coords_text = coordinates[0].text.strip()
|
175 |
coords = coords_text.split()
|
176 |
-
# Convert KML coordinates (comma-separated) into a list of tuples
|
177 |
coords = [tuple(map(float, coord.split(','))) for coord in coords]
|
178 |
-
geojson = {
|
179 |
-
"type": "Polygon", # Make sure the GeoJSON type is Polygon
|
180 |
-
"coordinates": [coords] # Wrap the coordinates in a list (required by GeoJSON format)
|
181 |
-
}
|
182 |
-
|
183 |
-
# Debugging: Inspect the KML-to-GeoJSON structure
|
184 |
-
print("KML GeoJSON:", geojson)
|
185 |
-
|
186 |
return ee.Geometry(geojson)
|
187 |
else:
|
188 |
raise ValueError("KML does not contain valid coordinates.")
|
189 |
except Exception as e:
|
190 |
raise ValueError(f"Error parsing KML: {e}")
|
191 |
-
|
192 |
else:
|
193 |
raise ValueError("Unsupported geometry input type. Supported types are Shapely, GeoJSON, and KML.")
|
194 |
|
195 |
-
# Function to read points from CSV
|
196 |
-
def read_csv(file_path):
|
197 |
-
df = pd.read_csv(file_path)
|
198 |
-
return df
|
199 |
-
|
200 |
-
# Function to read points from GeoJSON
|
201 |
-
def read_geojson(file_path):
|
202 |
-
gdf = gpd.read_file(file_path)
|
203 |
-
return gdf
|
204 |
-
|
205 |
-
# Function to read points from KML
|
206 |
-
def read_kml(file_path):
|
207 |
-
gdf = gpd.read_file(file_path, driver='KML')
|
208 |
-
return gdf
|
209 |
-
|
210 |
-
|
211 |
# Date Input for Start and End Dates
|
212 |
start_date = st.date_input("Start Date", value=pd.to_datetime('2024-11-01'))
|
213 |
end_date = st.date_input("End Date", value=pd.to_datetime('2024-12-01'))
|
@@ -219,16 +990,32 @@ end_date_str = end_date.strftime('%Y-%m-%d')
|
|
219 |
# Aggregation period selection
|
220 |
aggregation_period = st.selectbox("Select Aggregation Period", ["Daily", "Weekly", "Monthly", "Yearly"], index=0)
|
221 |
|
222 |
-
# Ask user whether they want to process 'Point' or 'Polygon' data
|
223 |
shape_type = st.selectbox("Do you want to process 'Point' or 'Polygon' data?", ["Point", "Polygon"])
|
224 |
|
225 |
-
# Ask user to upload a file based on shape type
|
226 |
file_upload = st.file_uploader(f"Upload your {shape_type} data (CSV, GeoJSON, KML)", type=["csv", "geojson", "kml"])
|
227 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
228 |
if file_upload is not None:
|
229 |
# Read the user-uploaded file
|
230 |
if shape_type.lower() == "point":
|
231 |
-
# Handle different file types for Point data
|
232 |
if file_upload.name.endswith('.csv'):
|
233 |
locations_df = pd.read_csv(file_upload)
|
234 |
elif file_upload.name.endswith('.geojson'):
|
@@ -239,53 +1026,34 @@ if file_upload is not None:
|
|
239 |
st.error("Unsupported file format. Please upload CSV, GeoJSON, or KML.")
|
240 |
locations_df = pd.DataFrame()
|
241 |
|
242 |
-
# Check if the file contains polygons when the user selected "Point"
|
243 |
if 'geometry' in locations_df.columns:
|
244 |
-
# Check if the geometry type is Polygon or MultiPolygon
|
245 |
if locations_df.geometry.geom_type.isin(['Polygon', 'MultiPolygon']).any():
|
246 |
st.warning("The uploaded file contains polygon data. Please select 'Polygon' for processing.")
|
247 |
-
st.stop()
|
248 |
|
249 |
-
# Processing the point data
|
250 |
with st.spinner('Processing Map...'):
|
251 |
if locations_df is not None and not locations_df.empty:
|
252 |
-
# For GeoJSON data, the coordinates are in the geometry column
|
253 |
if 'geometry' in locations_df.columns:
|
254 |
-
# Extract latitude and longitude from the geometry column
|
255 |
locations_df['latitude'] = locations_df['geometry'].y
|
256 |
locations_df['longitude'] = locations_df['geometry'].x
|
257 |
|
258 |
-
# Ensure the necessary columns exist in the dataframe
|
259 |
if 'latitude' not in locations_df.columns or 'longitude' not in locations_df.columns:
|
260 |
st.error("Uploaded file is missing required 'latitude' or 'longitude' columns.")
|
261 |
else:
|
262 |
-
# Display a preview of the points data
|
263 |
st.write("Preview of the uploaded points data:")
|
264 |
st.dataframe(locations_df.head())
|
265 |
-
|
266 |
-
# Create a LeafMap object to display the points
|
267 |
m = leafmap.Map(center=[locations_df['latitude'].mean(), locations_df['longitude'].mean()], zoom=10)
|
268 |
-
|
269 |
-
# Add points to the map using a loop
|
270 |
for _, row in locations_df.iterrows():
|
271 |
latitude = row['latitude']
|
272 |
longitude = row['longitude']
|
273 |
-
|
274 |
-
# Check if latitude or longitude are NaN and skip if they are
|
275 |
if pd.isna(latitude) or pd.isna(longitude):
|
276 |
-
continue
|
277 |
-
|
278 |
m.add_marker(location=[latitude, longitude], popup=row.get('name', 'No Name'))
|
279 |
-
|
280 |
-
# Display map
|
281 |
st.write("Map of Uploaded Points:")
|
282 |
m.to_streamlit()
|
283 |
-
|
284 |
-
# Store the map in session_state
|
285 |
st.session_state.map_data = m
|
286 |
|
287 |
elif shape_type.lower() == "polygon":
|
288 |
-
# Handle different file types for Polygon data:
|
289 |
if file_upload.name.endswith('.csv'):
|
290 |
locations_df = pd.read_csv(file_upload)
|
291 |
elif file_upload.name.endswith('.geojson'):
|
@@ -296,229 +1064,170 @@ if file_upload is not None:
|
|
296 |
st.error("Unsupported file format. Please upload CSV, GeoJSON, or KML.")
|
297 |
locations_df = pd.DataFrame()
|
298 |
|
299 |
-
# Check if the file contains points when the user selected "Polygon"
|
300 |
if 'geometry' in locations_df.columns:
|
301 |
-
# Check if the geometry type is Point or MultiPoint
|
302 |
if locations_df.geometry.geom_type.isin(['Point', 'MultiPoint']).any():
|
303 |
st.warning("The uploaded file contains point data. Please select 'Point' for processing.")
|
304 |
-
st.stop()
|
305 |
|
306 |
-
# Processing the polygon data
|
307 |
with st.spinner('Processing Map...'):
|
308 |
if locations_df is not None and not locations_df.empty:
|
309 |
-
# Ensure the 'geometry' column exists in the dataframe
|
310 |
if 'geometry' not in locations_df.columns:
|
311 |
st.error("Uploaded file is missing required 'geometry' column.")
|
312 |
else:
|
313 |
-
# Display a preview of the polygons data
|
314 |
st.write("Preview of the uploaded polygons data:")
|
315 |
st.dataframe(locations_df.head())
|
316 |
-
|
317 |
-
# Create a LeafMap object to display the polygons
|
318 |
-
# Calculate the centroid of the polygons for the map center
|
319 |
centroid_lat = locations_df.geometry.centroid.y.mean()
|
320 |
centroid_lon = locations_df.geometry.centroid.x.mean()
|
321 |
-
|
322 |
m = leafmap.Map(center=[centroid_lat, centroid_lon], zoom=10)
|
323 |
-
|
324 |
-
# Add polygons to the map using a loop
|
325 |
for _, row in locations_df.iterrows():
|
326 |
polygon = row['geometry']
|
327 |
-
if polygon.is_valid:
|
328 |
-
# Create a GeoDataFrame for this polygon
|
329 |
gdf = gpd.GeoDataFrame([row], geometry=[polygon], crs=locations_df.crs)
|
330 |
m.add_gdf(gdf=gdf, layer_name=row.get('name', 'Unnamed Polygon'))
|
331 |
-
|
332 |
-
# Display map
|
333 |
st.write("Map of Uploaded Polygons:")
|
334 |
m.to_streamlit()
|
335 |
-
|
336 |
-
# Store the map in session_state
|
337 |
st.session_state.map_data = m
|
338 |
|
339 |
-
# Initialize session state for storing results
|
340 |
if 'results' not in st.session_state:
|
341 |
st.session_state.results = []
|
342 |
if 'last_params' not in st.session_state:
|
343 |
st.session_state.last_params = {}
|
344 |
if 'map_data' not in st.session_state:
|
345 |
-
st.session_state.map_data = None
|
346 |
|
347 |
# Function to check if parameters have changed
|
348 |
def parameters_changed():
|
349 |
return (
|
350 |
st.session_state.last_params.get('main_selection') != main_selection or
|
351 |
st.session_state.last_params.get('dataset_id') != dataset_id or
|
352 |
-
st.session_state.last_params.get('
|
|
|
353 |
st.session_state.last_params.get('start_date_str') != start_date_str or
|
354 |
st.session_state.last_params.get('end_date_str') != end_date_str or
|
355 |
st.session_state.last_params.get('shape_type') != shape_type or
|
356 |
-
st.session_state.last_params.get('file_upload') != file_upload
|
|
|
|
|
357 |
)
|
358 |
|
359 |
# If parameters have changed, reset the results
|
360 |
if parameters_changed():
|
361 |
-
st.session_state.results = []
|
362 |
st.session_state.last_params = {
|
363 |
'main_selection': main_selection,
|
364 |
'dataset_id': dataset_id,
|
365 |
-
'
|
|
|
366 |
'start_date_str': start_date_str,
|
367 |
'end_date_str': end_date_str,
|
368 |
'shape_type': shape_type,
|
369 |
-
'file_upload': file_upload
|
|
|
|
|
370 |
}
|
371 |
|
372 |
-
# Function to calculate
|
373 |
-
def
|
374 |
-
ndvi = image.normalizedDifference(['B8', 'B4']).rename('NDVI')
|
375 |
-
return ndvi
|
376 |
-
|
377 |
-
# Function to calculate NDWI
|
378 |
-
def calculate_ndwi(image, geometry, reducer_choice):
|
379 |
-
ndwi = image.normalizedDifference(['B3', 'B8']).rename('NDWI')
|
380 |
-
return ndwi
|
381 |
-
|
382 |
-
def calculate_custom_formula(image, geometry, custom_formula, reducer_choice, scale=30):
|
383 |
try:
|
384 |
-
|
385 |
-
|
386 |
-
band_names = image.bandNames().getInfo()
|
387 |
-
if band1 not in band_names or band2 not in band_names:
|
388 |
-
raise ValueError(f"One or both bands ({band1}, {band2}) do not exist in the image.")
|
389 |
-
result = image.normalizedDifference([band1, band2]).rename('custom_formula')
|
390 |
-
else:
|
391 |
-
band = custom_formula.strip()
|
392 |
band_names = image.bandNames().getInfo()
|
393 |
if band not in band_names:
|
394 |
raise ValueError(f"The band '{band}' does not exist in the image.")
|
395 |
-
|
396 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
397 |
except Exception as e:
|
398 |
-
|
399 |
-
|
400 |
-
|
|
|
|
|
|
|
|
|
|
|
401 |
def aggregate_data_daily(collection):
|
402 |
-
# Extract day from the image date (using the exact date)
|
403 |
collection = collection.map(lambda image: image.set('day', ee.Date(image.get('system:time_start')).format('YYYY-MM-dd')))
|
404 |
-
|
405 |
-
# Group images by day (distinct days)
|
406 |
grouped_by_day = collection.aggregate_array('day').distinct()
|
407 |
-
|
408 |
def calculate_daily_mean(day):
|
409 |
-
# Filter the collection by the specific day
|
410 |
daily_collection = collection.filter(ee.Filter.eq('day', day))
|
411 |
-
daily_mean = daily_collection.mean()
|
412 |
return daily_mean.set('day', day)
|
413 |
-
|
414 |
-
# Calculate the daily mean for each day
|
415 |
daily_images = ee.List(grouped_by_day.map(calculate_daily_mean))
|
416 |
-
|
417 |
return ee.ImageCollection(daily_images)
|
418 |
|
419 |
def aggregate_data_weekly(collection):
|
420 |
-
|
421 |
-
|
422 |
-
'
|
423 |
-
|
424 |
-
|
425 |
-
|
|
|
426 |
grouped_by_week = collection.aggregate_array('week_start').distinct()
|
427 |
-
|
428 |
def calculate_weekly_mean(week_start):
|
429 |
-
# Filter the collection by the specific week start date
|
430 |
weekly_collection = collection.filter(ee.Filter.eq('week_start', week_start))
|
431 |
-
weekly_mean = weekly_collection.mean()
|
432 |
return weekly_mean.set('week_start', week_start)
|
433 |
-
|
434 |
-
# Calculate the weekly mean for each week
|
435 |
weekly_images = ee.List(grouped_by_week.map(calculate_weekly_mean))
|
436 |
return ee.ImageCollection(weekly_images)
|
437 |
-
|
438 |
def aggregate_data_monthly(collection, start_date, end_date):
|
439 |
-
# Filter the collection for the specific date range
|
440 |
collection = collection.filterDate(start_date, end_date)
|
441 |
-
|
442 |
-
# Extract month and year from the image date
|
443 |
collection = collection.map(lambda image: image.set('month', ee.Date(image.get('system:time_start')).format('YYYY-MM')))
|
444 |
-
|
445 |
-
# Group images by month
|
446 |
grouped_by_month = collection.aggregate_array('month').distinct()
|
447 |
-
|
448 |
def calculate_monthly_mean(month):
|
449 |
monthly_collection = collection.filter(ee.Filter.eq('month', month))
|
450 |
monthly_mean = monthly_collection.mean()
|
451 |
return monthly_mean.set('month', month)
|
452 |
-
|
453 |
-
# Calculate the monthly mean for each month
|
454 |
monthly_images = ee.List(grouped_by_month.map(calculate_monthly_mean))
|
455 |
-
|
456 |
return ee.ImageCollection(monthly_images)
|
457 |
-
|
458 |
def aggregate_data_yearly(collection):
|
459 |
-
# Extract year from the image date
|
460 |
collection = collection.map(lambda image: image.set('year', ee.Date(image.get('system:time_start')).format('YYYY')))
|
461 |
-
|
462 |
-
# Group images by year
|
463 |
grouped_by_year = collection.aggregate_array('year').distinct()
|
464 |
-
|
465 |
def calculate_yearly_mean(year):
|
466 |
yearly_collection = collection.filter(ee.Filter.eq('year', year))
|
467 |
yearly_mean = yearly_collection.mean()
|
468 |
return yearly_mean.set('year', year)
|
469 |
-
|
470 |
-
# Calculate the yearly mean for each year
|
471 |
yearly_images = ee.List(grouped_by_year.map(calculate_yearly_mean))
|
472 |
-
|
473 |
return ee.ImageCollection(yearly_images)
|
474 |
|
475 |
-
#
|
476 |
-
def
|
477 |
-
if index_choice.lower() == 'ndvi':
|
478 |
-
return calculate_ndvi(image, roi, reducer_choice)
|
479 |
-
elif index_choice.lower() == 'ndwi':
|
480 |
-
return calculate_ndwi(image, roi, reducer_choice)
|
481 |
-
elif index_choice.lower() == 'average no₂':
|
482 |
-
mean_no2 = image.select('NO2').mean().rename('Average NO₂')
|
483 |
-
return mean_no2
|
484 |
-
elif index_choice.lower() == 'custom formula':
|
485 |
-
# Pass the custom formula here, not the index_choice
|
486 |
-
return calculate_custom_formula(image, roi, custom_formula, reducer_choice)
|
487 |
-
else:
|
488 |
-
st.write("Please Select any one option...."+ index_choice.lower())
|
489 |
-
|
490 |
-
def aggregate_data_weekly(collection):
|
491 |
-
def set_week_start(image):
|
492 |
-
# Get the image timestamp
|
493 |
-
date = ee.Date(image.get('system:time_start'))
|
494 |
-
# Calculate days since the start of the week (0 = Monday, 6 = Sunday)
|
495 |
-
days_since_week_start = date.getRelative('day', 'week')
|
496 |
-
# Convert to ee.Number and negate it to get the offset to the week start
|
497 |
-
offset = ee.Number(days_since_week_start).multiply(-1)
|
498 |
-
# Advance the date by the negative offset to get the week start
|
499 |
-
week_start = date.advance(offset, 'day')
|
500 |
-
return image.set('week_start', week_start.format('YYYY-MM-dd')) # Ensure string format
|
501 |
-
|
502 |
-
# Apply the week start calculation to each image
|
503 |
-
collection = collection.map(set_week_start)
|
504 |
-
|
505 |
-
# Group images by week start date
|
506 |
-
grouped_by_week = collection.aggregate_array('week_start').distinct()
|
507 |
-
|
508 |
-
def calculate_weekly_mean(week_start):
|
509 |
-
# Filter the collection by the specific week start date
|
510 |
-
weekly_collection = collection.filter(ee.Filter.eq('week_start', week_start))
|
511 |
-
weekly_mean = weekly_collection.mean() # Calculate mean for the week
|
512 |
-
return weekly_mean.set('week_start', week_start)
|
513 |
-
|
514 |
-
# Calculate the weekly mean for each week
|
515 |
-
weekly_images = ee.List(grouped_by_week.map(calculate_weekly_mean))
|
516 |
-
return ee.ImageCollection(weekly_images)
|
517 |
-
|
518 |
-
def process_aggregation(locations_df, start_date_str, end_date_str, dataset_id, index_choice, reducer_choice, shape_type, aggregation_period, custom_formula=""):
|
519 |
aggregated_results = []
|
520 |
|
521 |
-
if
|
522 |
st.error("Custom formula cannot be empty. Please provide a formula.")
|
523 |
return aggregated_results
|
524 |
|
@@ -536,13 +1245,23 @@ def process_aggregation(locations_df, start_date_str, end_date_str, dataset_id,
|
|
536 |
continue
|
537 |
|
538 |
location_name = row.get('name', f"Location_{idx}")
|
539 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
540 |
|
541 |
collection = ee.ImageCollection(dataset_id) \
|
542 |
.filterDate(ee.Date(start_date_str), ee.Date(end_date_str)) \
|
543 |
.filterBounds(roi)
|
544 |
|
545 |
-
# Aggregate data based on the selected period
|
546 |
if aggregation_period.lower() == 'daily':
|
547 |
collection = aggregate_data_daily(collection)
|
548 |
elif aggregation_period.lower() == 'weekly':
|
@@ -552,9 +1271,8 @@ def process_aggregation(locations_df, start_date_str, end_date_str, dataset_id,
|
|
552 |
elif aggregation_period.lower() == 'yearly':
|
553 |
collection = aggregate_data_yearly(collection)
|
554 |
|
555 |
-
# Process each image in the collection
|
556 |
image_list = collection.toList(collection.size())
|
557 |
-
processed_weeks = set()
|
558 |
for i in range(image_list.size().getInfo()):
|
559 |
image = ee.Image(image_list.get(i))
|
560 |
|
@@ -565,8 +1283,7 @@ def process_aggregation(locations_df, start_date_str, end_date_str, dataset_id,
|
|
565 |
elif aggregation_period.lower() == 'weekly':
|
566 |
timestamp = image.get('week_start')
|
567 |
period_label = 'Week'
|
568 |
-
date = ee.String(timestamp).getInfo()
|
569 |
-
# Skip if week is outside the date range or already processed
|
570 |
if (pd.to_datetime(date) < pd.to_datetime(start_date_str) or
|
571 |
pd.to_datetime(date) > pd.to_datetime(end_date_str) or
|
572 |
date in processed_weeks):
|
@@ -581,14 +1298,14 @@ def process_aggregation(locations_df, start_date_str, end_date_str, dataset_id,
|
|
581 |
period_label = 'Year'
|
582 |
date = ee.Date(timestamp).format('YYYY').getInfo()
|
583 |
|
584 |
-
index_image = calculate_index_for_period(image, roi,
|
585 |
|
586 |
try:
|
587 |
index_value = index_image.reduceRegion(
|
588 |
reducer=get_reducer(reducer_choice),
|
589 |
geometry=roi,
|
590 |
scale=30
|
591 |
-
).get(
|
592 |
|
593 |
calculated_value = index_value.getInfo()
|
594 |
|
@@ -619,6 +1336,9 @@ def process_aggregation(locations_df, start_date_str, end_date_str, dataset_id,
|
|
619 |
|
620 |
try:
|
621 |
roi = convert_to_ee_geometry(polygon_geometry)
|
|
|
|
|
|
|
622 |
except ValueError as e:
|
623 |
st.warning(f"Skipping invalid polygon {polygon_name}: {e}")
|
624 |
continue
|
@@ -627,7 +1347,6 @@ def process_aggregation(locations_df, start_date_str, end_date_str, dataset_id,
|
|
627 |
.filterDate(ee.Date(start_date_str), ee.Date(end_date_str)) \
|
628 |
.filterBounds(roi)
|
629 |
|
630 |
-
# Aggregate data based on the selected period
|
631 |
if aggregation_period.lower() == 'daily':
|
632 |
collection = aggregate_data_daily(collection)
|
633 |
elif aggregation_period.lower() == 'weekly':
|
@@ -637,9 +1356,8 @@ def process_aggregation(locations_df, start_date_str, end_date_str, dataset_id,
|
|
637 |
elif aggregation_period.lower() == 'yearly':
|
638 |
collection = aggregate_data_yearly(collection)
|
639 |
|
640 |
-
# Process each image in the collection
|
641 |
image_list = collection.toList(collection.size())
|
642 |
-
processed_weeks = set()
|
643 |
for i in range(image_list.size().getInfo()):
|
644 |
image = ee.Image(image_list.get(i))
|
645 |
|
@@ -650,8 +1368,7 @@ def process_aggregation(locations_df, start_date_str, end_date_str, dataset_id,
|
|
650 |
elif aggregation_period.lower() == 'weekly':
|
651 |
timestamp = image.get('week_start')
|
652 |
period_label = 'Week'
|
653 |
-
date = ee.String(timestamp).getInfo()
|
654 |
-
# Skip if week is outside the date range or already processed
|
655 |
if (pd.to_datetime(date) < pd.to_datetime(start_date_str) or
|
656 |
pd.to_datetime(date) > pd.to_datetime(end_date_str) or
|
657 |
date in processed_weeks):
|
@@ -666,14 +1383,14 @@ def process_aggregation(locations_df, start_date_str, end_date_str, dataset_id,
|
|
666 |
period_label = 'Year'
|
667 |
date = ee.Date(timestamp).format('YYYY').getInfo()
|
668 |
|
669 |
-
index_image = calculate_index_for_period(image, roi,
|
670 |
|
671 |
try:
|
672 |
index_value = index_image.reduceRegion(
|
673 |
reducer=get_reducer(reducer_choice),
|
674 |
geometry=roi,
|
675 |
scale=30
|
676 |
-
).get(
|
677 |
|
678 |
calculated_value = index_value.getInfo()
|
679 |
|
@@ -694,29 +1411,9 @@ def process_aggregation(locations_df, start_date_str, end_date_str, dataset_id,
|
|
694 |
progress_bar.progress(progress_percentage)
|
695 |
progress_text.markdown(f"Processing: {int(progress_percentage * 100)}%")
|
696 |
|
697 |
-
# if aggregated_results:
|
698 |
-
# result_df = pd.DataFrame(aggregated_results)
|
699 |
-
# if aggregation_period.lower() == 'daily':
|
700 |
-
# aggregated_output = result_df.groupby('Location Name').agg({
|
701 |
-
# 'Latitude': 'first' if shape_type.lower() == 'point' else None,
|
702 |
-
# 'Longitude': 'first' if shape_type.lower() == 'point' else None,
|
703 |
-
# 'Start Date': 'first',
|
704 |
-
# 'End Date': 'first',
|
705 |
-
# 'Calculated Value': 'mean'
|
706 |
-
# }).reset_index()
|
707 |
-
# # Remove None columns (for polygons)
|
708 |
-
# aggregated_output = aggregated_output[[col for col in aggregated_output.columns if col is not None]]
|
709 |
-
# aggregated_output.rename(columns={'Calculated Value': 'Aggregated Value'}, inplace=True)
|
710 |
-
# return aggregated_output.to_dict(orient='records')
|
711 |
-
# else:
|
712 |
-
# return result_df.to_dict(orient='records')
|
713 |
-
|
714 |
-
# return []
|
715 |
-
|
716 |
if aggregated_results:
|
717 |
result_df = pd.DataFrame(aggregated_results)
|
718 |
if aggregation_period.lower() == 'daily':
|
719 |
-
# Define aggregation dictionary based on shape_type
|
720 |
agg_dict = {
|
721 |
'Start Date': 'first',
|
722 |
'End Date': 'first',
|
@@ -725,57 +1422,29 @@ def process_aggregation(locations_df, start_date_str, end_date_str, dataset_id,
|
|
725 |
if shape_type.lower() == 'point':
|
726 |
agg_dict['Latitude'] = 'first'
|
727 |
agg_dict['Longitude'] = 'first'
|
728 |
-
|
729 |
aggregated_output = result_df.groupby('Location Name').agg(agg_dict).reset_index()
|
730 |
aggregated_output.rename(columns={'Calculated Value': 'Aggregated Value'}, inplace=True)
|
731 |
return aggregated_output.to_dict(orient='records')
|
732 |
else:
|
733 |
return result_df.to_dict(orient='records')
|
734 |
-
|
735 |
return []
|
736 |
-
|
737 |
-
#
|
738 |
-
if st.button(
|
739 |
if file_upload is not None:
|
740 |
-
if shape_type.lower()
|
741 |
-
results = process_aggregation(
|
742 |
-
locations_df,
|
743 |
-
start_date_str,
|
744 |
-
end_date_str,
|
745 |
-
dataset_id,
|
746 |
-
index_choice,
|
747 |
-
reducer_choice,
|
748 |
-
shape_type,
|
749 |
-
aggregation_period,
|
750 |
-
custom_formula
|
751 |
-
)
|
752 |
-
if results:
|
753 |
-
result_df = pd.DataFrame(results)
|
754 |
-
st.write(f"Processed Results Table ({aggregation_period}):")
|
755 |
-
st.dataframe(result_df)
|
756 |
-
filename = f"{main_selection}_{dataset_id}_{start_date.strftime('%Y/%m/%d')}_{end_date.strftime('%Y/%m/%d')}_{aggregation_period.lower()}.csv"
|
757 |
-
st.download_button(
|
758 |
-
label="Download results as CSV",
|
759 |
-
data=result_df.to_csv(index=False).encode('utf-8'),
|
760 |
-
file_name=filename,
|
761 |
-
mime='text/csv'
|
762 |
-
)
|
763 |
-
st.spinner('')
|
764 |
-
st.success('Processing complete!')
|
765 |
-
else:
|
766 |
-
st.warning("No results were generated.")
|
767 |
-
|
768 |
-
elif shape_type.lower() == "polygon":
|
769 |
results = process_aggregation(
|
770 |
locations_df,
|
771 |
start_date_str,
|
772 |
end_date_str,
|
773 |
dataset_id,
|
774 |
-
|
775 |
reducer_choice,
|
776 |
shape_type,
|
777 |
aggregation_period,
|
778 |
-
custom_formula
|
|
|
|
|
779 |
)
|
780 |
if results:
|
781 |
result_df = pd.DataFrame(results)
|
@@ -792,6 +1461,5 @@ if st.button(f"Calculate ({index_choice})"):
|
|
792 |
st.success('Processing complete!')
|
793 |
else:
|
794 |
st.warning("No results were generated.")
|
795 |
-
|
796 |
else:
|
797 |
st.warning("Please upload a file.")
|
|
|
1 |
+
# import streamlit as st
|
2 |
+
# import json
|
3 |
+
# import ee
|
4 |
+
# import os
|
5 |
+
# import pandas as pd
|
6 |
+
# import geopandas as gpd
|
7 |
+
# from datetime import datetime
|
8 |
+
# import leafmap.foliumap as leafmap
|
9 |
+
# import re
|
10 |
+
# from shapely.geometry import base
|
11 |
+
# from lxml import etree
|
12 |
+
# from xml.etree import ElementTree as ET
|
13 |
+
|
14 |
+
# # Set up the page layout
|
15 |
+
# st.set_page_config(layout="wide")
|
16 |
+
|
17 |
+
# # Custom button styling
|
18 |
+
# m = st.markdown(
|
19 |
+
# """
|
20 |
+
# <style>
|
21 |
+
# div.stButton > button:first-child {
|
22 |
+
# background-color: #006400;
|
23 |
+
# color:#ffffff;
|
24 |
+
# }
|
25 |
+
# </style>""",
|
26 |
+
# unsafe_allow_html=True,
|
27 |
+
# )
|
28 |
+
|
29 |
+
# # Logo
|
30 |
+
# st.write(
|
31 |
+
# f"""
|
32 |
+
# <div style="display: flex; justify-content: space-between; align-items: center;">
|
33 |
+
# <img src="https://huggingface.co/spaces/YashMK89/GEE_Calculator/resolve/main/ISRO_Logo.png" style="width: 20%; margin-right: auto;">
|
34 |
+
# <img src="https://huggingface.co/spaces/YashMK89/GEE_Calculator/resolve/main/SAC_Logo.png" style="width: 20%; margin-left: auto;">
|
35 |
+
# </div>
|
36 |
+
# """,
|
37 |
+
# unsafe_allow_html=True,
|
38 |
+
# )
|
39 |
+
|
40 |
+
# # Title
|
41 |
+
# st.markdown(
|
42 |
+
# f"""
|
43 |
+
# <h1 style="text-align: center;">Precision Analysis for Vegetation, Water, and Air Quality</h1>
|
44 |
+
# """,
|
45 |
+
# unsafe_allow_html=True,
|
46 |
+
# )
|
47 |
+
# st.write("<h2><div style='text-align: center;'>User Inputs</div></h2>", unsafe_allow_html=True)
|
48 |
+
|
49 |
+
# # Authenticate and initialize Earth Engine
|
50 |
+
# earthengine_credentials = os.environ.get("EE_Authentication")
|
51 |
+
|
52 |
+
# # Initialize Earth Engine with secret credentials
|
53 |
+
# os.makedirs(os.path.expanduser("~/.config/earthengine/"), exist_ok=True)
|
54 |
+
# with open(os.path.expanduser("~/.config/earthengine/credentials"), "w") as f:
|
55 |
+
# f.write(earthengine_credentials)
|
56 |
+
|
57 |
+
# ee.Initialize(project='ee-yashsacisro24')
|
58 |
+
|
59 |
+
# # Load the Sentinel dataset options from JSON file
|
60 |
+
# with open("sentinel_datasets.json") as f:
|
61 |
+
# data = json.load(f)
|
62 |
+
|
63 |
+
# # Display the title for the Streamlit app
|
64 |
+
# st.title("Sentinel Dataset")
|
65 |
+
|
66 |
+
# # Select dataset category (main selection)
|
67 |
+
# main_selection = st.selectbox("Select Sentinel Dataset Category", list(data.keys()))
|
68 |
+
|
69 |
+
# # If a category is selected, display the sub-options (specific datasets)
|
70 |
+
# if main_selection:
|
71 |
+
# sub_options = data[main_selection]["sub_options"]
|
72 |
+
# sub_selection = st.selectbox("Select Specific Dataset ID", list(sub_options.keys()))
|
73 |
+
|
74 |
+
# # Display the selected dataset ID based on user input
|
75 |
+
# if sub_selection:
|
76 |
+
# st.write(f"You selected: {main_selection} -> {sub_selection}")
|
77 |
+
# st.write(f"Dataset ID: {sub_options[sub_selection]}")
|
78 |
+
|
79 |
+
# # Fetch the correct dataset ID from the sub-selection
|
80 |
+
# dataset_id = sub_options[sub_selection]
|
81 |
+
|
82 |
+
# # Earth Engine Index Calculator Section
|
83 |
+
# st.header("Earth Engine Index Calculator")
|
84 |
+
|
85 |
+
# index_choice = st.selectbox("Select an Index or Enter Custom Formula", ['NDVI', 'NDWI', 'Average NO₂', 'Custom Formula'])
|
86 |
+
|
87 |
+
# # Initialize custom_formula variable
|
88 |
+
# custom_formula = ""
|
89 |
+
|
90 |
+
# # Display corresponding formula based on the index selected (case-insensitive)
|
91 |
+
# if index_choice.lower() == 'ndvi':
|
92 |
+
# st.write("Formula for NDVI: NDVI = (B8 - B4) / (B8 + B4)")
|
93 |
+
# elif index_choice.lower() == 'ndwi':
|
94 |
+
# st.write("Formula for NDWI: NDWI = (B3 - B8) / (B3 + B8)")
|
95 |
+
# elif index_choice.lower() == 'average no₂':
|
96 |
+
# st.write("Formula for Average NO₂: Average NO₂ = Mean(NO2 band)")
|
97 |
+
# elif index_choice.lower() == 'custom formula':
|
98 |
+
# custom_formula = st.text_input("Enter Custom Formula (e.g., B5,B4 for two bands or B3 for one band)")
|
99 |
+
# # Check if custom formula is empty and show warning
|
100 |
+
# if not custom_formula:
|
101 |
+
# st.warning("Please enter a custom formula before proceeding.")
|
102 |
+
# else:
|
103 |
+
# # Check if the input contains a comma (indicating two bands)
|
104 |
+
# if ',' in custom_formula:
|
105 |
+
# # Split the input into two bands and strip whitespace
|
106 |
+
# band1, band2 = [b.strip() for b in custom_formula.split(',', 1)]
|
107 |
+
# st.write(f"Custom Formula: ({band1} - {band2}) / ({band1} + {band2})")
|
108 |
+
# else:
|
109 |
+
# # Single band case
|
110 |
+
# band = custom_formula.strip()
|
111 |
+
# st.write(f"Custom Formula: {band}")
|
112 |
+
|
113 |
+
# # Function to get the corresponding reducer based on user input
|
114 |
+
# def get_reducer(reducer_name):
|
115 |
+
# """
|
116 |
+
# Map user-friendly reducer names to Earth Engine reducer objects.
|
117 |
+
# """
|
118 |
+
# reducers = {
|
119 |
+
# 'mean': ee.Reducer.mean(),
|
120 |
+
# 'sum': ee.Reducer.sum(),
|
121 |
+
# 'median': ee.Reducer.median(),
|
122 |
+
# 'min': ee.Reducer.min(),
|
123 |
+
# 'max': ee.Reducer.max(),
|
124 |
+
# 'count': ee.Reducer.count(),
|
125 |
+
# }
|
126 |
+
|
127 |
+
# # Default to 'mean' if the reducer_name is not recognized
|
128 |
+
# return reducers.get(reducer_name.lower(), ee.Reducer.mean())
|
129 |
+
|
130 |
+
# # Streamlit selectbox for reducer choice
|
131 |
+
# reducer_choice = st.selectbox(
|
132 |
+
# "Select Reducer",
|
133 |
+
# ['mean', 'sum', 'median', 'min', 'max', 'count'],
|
134 |
+
# index=0 # Default to 'mean'
|
135 |
+
# )
|
136 |
+
|
137 |
+
# def convert_to_ee_geometry(geometry):
|
138 |
+
# # Handle Shapely geometry
|
139 |
+
# if isinstance(geometry, base.BaseGeometry):
|
140 |
+
# if geometry.is_valid:
|
141 |
+
# geojson = geometry.__geo_interface__
|
142 |
+
# print("Shapely GeoJSON:", geojson) # Debugging: Inspect the GeoJSON structure
|
143 |
+
# return ee.Geometry(geojson)
|
144 |
+
# else:
|
145 |
+
# raise ValueError("Invalid geometry: The polygon geometry is not valid.")
|
146 |
+
|
147 |
+
# # Handle GeoJSON input (string or dictionary)
|
148 |
+
# elif isinstance(geometry, dict) or isinstance(geometry, str):
|
149 |
+
# try:
|
150 |
+
# if isinstance(geometry, str):
|
151 |
+
# geometry = json.loads(geometry)
|
152 |
+
# if 'type' in geometry and 'coordinates' in geometry:
|
153 |
+
# print("GeoJSON Geometry:", geometry) # Debugging: Inspect the GeoJSON structure
|
154 |
+
# return ee.Geometry(geometry)
|
155 |
+
# else:
|
156 |
+
# raise ValueError("GeoJSON format is invalid.")
|
157 |
+
# except Exception as e:
|
158 |
+
# raise ValueError(f"Error parsing GeoJSON: {e}")
|
159 |
+
|
160 |
+
# # Handle KML input (string or file path)
|
161 |
+
# elif isinstance(geometry, str) and geometry.lower().endswith(".kml"):
|
162 |
+
# try:
|
163 |
+
# # Parse the KML file
|
164 |
+
# tree = ET.parse(geometry)
|
165 |
+
# kml_root = tree.getroot()
|
166 |
+
|
167 |
+
# # Extract coordinates from KML geometry (assuming it's a Polygon or MultiPolygon)
|
168 |
+
# # KML coordinates are usually within the <coordinates> tag
|
169 |
+
# kml_namespace = {'kml': 'http://www.opengis.net/kml/2.2'}
|
170 |
+
# coordinates = kml_root.findall(".//kml:coordinates", kml_namespace)
|
171 |
+
|
172 |
+
# if coordinates:
|
173 |
+
# # Extract and format coordinates
|
174 |
+
# coords_text = coordinates[0].text.strip()
|
175 |
+
# coords = coords_text.split()
|
176 |
+
# # Convert KML coordinates (comma-separated) into a list of tuples
|
177 |
+
# coords = [tuple(map(float, coord.split(','))) for coord in coords]
|
178 |
+
# geojson = {
|
179 |
+
# "type": "Polygon", # Make sure the GeoJSON type is Polygon
|
180 |
+
# "coordinates": [coords] # Wrap the coordinates in a list (required by GeoJSON format)
|
181 |
+
# }
|
182 |
+
|
183 |
+
# # Debugging: Inspect the KML-to-GeoJSON structure
|
184 |
+
# print("KML GeoJSON:", geojson)
|
185 |
+
|
186 |
+
# return ee.Geometry(geojson)
|
187 |
+
# else:
|
188 |
+
# raise ValueError("KML does not contain valid coordinates.")
|
189 |
+
# except Exception as e:
|
190 |
+
# raise ValueError(f"Error parsing KML: {e}")
|
191 |
+
|
192 |
+
# else:
|
193 |
+
# raise ValueError("Unsupported geometry input type. Supported types are Shapely, GeoJSON, and KML.")
|
194 |
+
|
195 |
+
# # Function to read points from CSV
|
196 |
+
# def read_csv(file_path):
|
197 |
+
# df = pd.read_csv(file_path)
|
198 |
+
# return df
|
199 |
+
|
200 |
+
# # Function to read points from GeoJSON
|
201 |
+
# def read_geojson(file_path):
|
202 |
+
# gdf = gpd.read_file(file_path)
|
203 |
+
# return gdf
|
204 |
+
|
205 |
+
# # Function to read points from KML
|
206 |
+
# def read_kml(file_path):
|
207 |
+
# gdf = gpd.read_file(file_path, driver='KML')
|
208 |
+
# return gdf
|
209 |
+
|
210 |
+
|
211 |
+
# # Date Input for Start and End Dates
|
212 |
+
# start_date = st.date_input("Start Date", value=pd.to_datetime('2024-11-01'))
|
213 |
+
# end_date = st.date_input("End Date", value=pd.to_datetime('2024-12-01'))
|
214 |
+
|
215 |
+
# # Convert start_date and end_date to string format for Earth Engine
|
216 |
+
# start_date_str = start_date.strftime('%Y-%m-%d')
|
217 |
+
# end_date_str = end_date.strftime('%Y-%m-%d')
|
218 |
+
|
219 |
+
# # Aggregation period selection
|
220 |
+
# aggregation_period = st.selectbox("Select Aggregation Period", ["Daily", "Weekly", "Monthly", "Yearly"], index=0)
|
221 |
+
|
222 |
+
# # Ask user whether they want to process 'Point' or 'Polygon' data (case-insensitive)
|
223 |
+
# shape_type = st.selectbox("Do you want to process 'Point' or 'Polygon' data?", ["Point", "Polygon"])
|
224 |
+
|
225 |
+
# # Ask user to upload a file based on shape type (case-insensitive)
|
226 |
+
# file_upload = st.file_uploader(f"Upload your {shape_type} data (CSV, GeoJSON, KML)", type=["csv", "geojson", "kml"])
|
227 |
+
|
228 |
+
# if file_upload is not None:
|
229 |
+
# # Read the user-uploaded file
|
230 |
+
# if shape_type.lower() == "point":
|
231 |
+
# # Handle different file types for Point data
|
232 |
+
# if file_upload.name.endswith('.csv'):
|
233 |
+
# locations_df = pd.read_csv(file_upload)
|
234 |
+
# elif file_upload.name.endswith('.geojson'):
|
235 |
+
# locations_df = gpd.read_file(file_upload)
|
236 |
+
# elif file_upload.name.endswith('.kml'):
|
237 |
+
# locations_df = gpd.read_file(file_upload)
|
238 |
+
# else:
|
239 |
+
# st.error("Unsupported file format. Please upload CSV, GeoJSON, or KML.")
|
240 |
+
# locations_df = pd.DataFrame()
|
241 |
+
|
242 |
+
# # Check if the file contains polygons when the user selected "Point"
|
243 |
+
# if 'geometry' in locations_df.columns:
|
244 |
+
# # Check if the geometry type is Polygon or MultiPolygon
|
245 |
+
# if locations_df.geometry.geom_type.isin(['Polygon', 'MultiPolygon']).any():
|
246 |
+
# st.warning("The uploaded file contains polygon data. Please select 'Polygon' for processing.")
|
247 |
+
# st.stop() # Stop further processing if polygons are detected
|
248 |
+
|
249 |
+
# # Processing the point data
|
250 |
+
# with st.spinner('Processing Map...'):
|
251 |
+
# if locations_df is not None and not locations_df.empty:
|
252 |
+
# # For GeoJSON data, the coordinates are in the geometry column
|
253 |
+
# if 'geometry' in locations_df.columns:
|
254 |
+
# # Extract latitude and longitude from the geometry column
|
255 |
+
# locations_df['latitude'] = locations_df['geometry'].y
|
256 |
+
# locations_df['longitude'] = locations_df['geometry'].x
|
257 |
+
|
258 |
+
# # Ensure the necessary columns exist in the dataframe
|
259 |
+
# if 'latitude' not in locations_df.columns or 'longitude' not in locations_df.columns:
|
260 |
+
# st.error("Uploaded file is missing required 'latitude' or 'longitude' columns.")
|
261 |
+
# else:
|
262 |
+
# # Display a preview of the points data
|
263 |
+
# st.write("Preview of the uploaded points data:")
|
264 |
+
# st.dataframe(locations_df.head())
|
265 |
+
|
266 |
+
# # Create a LeafMap object to display the points
|
267 |
+
# m = leafmap.Map(center=[locations_df['latitude'].mean(), locations_df['longitude'].mean()], zoom=10)
|
268 |
+
|
269 |
+
# # Add points to the map using a loop
|
270 |
+
# for _, row in locations_df.iterrows():
|
271 |
+
# latitude = row['latitude']
|
272 |
+
# longitude = row['longitude']
|
273 |
+
|
274 |
+
# # Check if latitude or longitude are NaN and skip if they are
|
275 |
+
# if pd.isna(latitude) or pd.isna(longitude):
|
276 |
+
# continue # Skip this row and move to the next one
|
277 |
+
|
278 |
+
# m.add_marker(location=[latitude, longitude], popup=row.get('name', 'No Name'))
|
279 |
+
|
280 |
+
# # Display map
|
281 |
+
# st.write("Map of Uploaded Points:")
|
282 |
+
# m.to_streamlit()
|
283 |
+
|
284 |
+
# # Store the map in session_state
|
285 |
+
# st.session_state.map_data = m
|
286 |
+
|
287 |
+
# elif shape_type.lower() == "polygon":
|
288 |
+
# # Handle different file types for Polygon data:
|
289 |
+
# if file_upload.name.endswith('.csv'):
|
290 |
+
# locations_df = pd.read_csv(file_upload)
|
291 |
+
# elif file_upload.name.endswith('.geojson'):
|
292 |
+
# locations_df = gpd.read_file(file_upload)
|
293 |
+
# elif file_upload.name.endswith('.kml'):
|
294 |
+
# locations_df = gpd.read_file(file_upload)
|
295 |
+
# else:
|
296 |
+
# st.error("Unsupported file format. Please upload CSV, GeoJSON, or KML.")
|
297 |
+
# locations_df = pd.DataFrame()
|
298 |
+
|
299 |
+
# # Check if the file contains points when the user selected "Polygon"
|
300 |
+
# if 'geometry' in locations_df.columns:
|
301 |
+
# # Check if the geometry type is Point or MultiPoint
|
302 |
+
# if locations_df.geometry.geom_type.isin(['Point', 'MultiPoint']).any():
|
303 |
+
# st.warning("The uploaded file contains point data. Please select 'Point' for processing.")
|
304 |
+
# st.stop() # Stop further processing if point data is detected
|
305 |
+
|
306 |
+
# # Processing the polygon data
|
307 |
+
# with st.spinner('Processing Map...'):
|
308 |
+
# if locations_df is not None and not locations_df.empty:
|
309 |
+
# # Ensure the 'geometry' column exists in the dataframe
|
310 |
+
# if 'geometry' not in locations_df.columns:
|
311 |
+
# st.error("Uploaded file is missing required 'geometry' column.")
|
312 |
+
# else:
|
313 |
+
# # Display a preview of the polygons data
|
314 |
+
# st.write("Preview of the uploaded polygons data:")
|
315 |
+
# st.dataframe(locations_df.head())
|
316 |
+
|
317 |
+
# # Create a LeafMap object to display the polygons
|
318 |
+
# # Calculate the centroid of the polygons for the map center
|
319 |
+
# centroid_lat = locations_df.geometry.centroid.y.mean()
|
320 |
+
# centroid_lon = locations_df.geometry.centroid.x.mean()
|
321 |
+
|
322 |
+
# m = leafmap.Map(center=[centroid_lat, centroid_lon], zoom=10)
|
323 |
+
|
324 |
+
# # Add polygons to the map using a loop
|
325 |
+
# for _, row in locations_df.iterrows():
|
326 |
+
# polygon = row['geometry']
|
327 |
+
# if polygon.is_valid: # Check if polygon is valid
|
328 |
+
# # Create a GeoDataFrame for this polygon
|
329 |
+
# gdf = gpd.GeoDataFrame([row], geometry=[polygon], crs=locations_df.crs)
|
330 |
+
# m.add_gdf(gdf=gdf, layer_name=row.get('name', 'Unnamed Polygon'))
|
331 |
+
|
332 |
+
# # Display map
|
333 |
+
# st.write("Map of Uploaded Polygons:")
|
334 |
+
# m.to_streamlit()
|
335 |
+
|
336 |
+
# # Store the map in session_state
|
337 |
+
# st.session_state.map_data = m
|
338 |
+
|
339 |
+
# # Initialize session state for storing results if not already done
|
340 |
+
# if 'results' not in st.session_state:
|
341 |
+
# st.session_state.results = []
|
342 |
+
# if 'last_params' not in st.session_state:
|
343 |
+
# st.session_state.last_params = {}
|
344 |
+
# if 'map_data' not in st.session_state:
|
345 |
+
# st.session_state.map_data = None # Initialize map_data
|
346 |
+
|
347 |
+
# # Function to check if parameters have changed
|
348 |
+
# def parameters_changed():
|
349 |
+
# return (
|
350 |
+
# st.session_state.last_params.get('main_selection') != main_selection or
|
351 |
+
# st.session_state.last_params.get('dataset_id') != dataset_id or
|
352 |
+
# st.session_state.last_params.get('index_choice') != index_choice or
|
353 |
+
# st.session_state.last_params.get('start_date_str') != start_date_str or
|
354 |
+
# st.session_state.last_params.get('end_date_str') != end_date_str or
|
355 |
+
# st.session_state.last_params.get('shape_type') != shape_type or
|
356 |
+
# st.session_state.last_params.get('file_upload') != file_upload
|
357 |
+
# )
|
358 |
+
|
359 |
+
# # If parameters have changed, reset the results
|
360 |
+
# if parameters_changed():
|
361 |
+
# st.session_state.results = [] # Clear the previous results
|
362 |
+
# st.session_state.last_params = {
|
363 |
+
# 'main_selection': main_selection,
|
364 |
+
# 'dataset_id': dataset_id,
|
365 |
+
# 'index_choice': index_choice,
|
366 |
+
# 'start_date_str': start_date_str,
|
367 |
+
# 'end_date_str': end_date_str,
|
368 |
+
# 'shape_type': shape_type,
|
369 |
+
# 'file_upload': file_upload
|
370 |
+
# }
|
371 |
+
|
372 |
+
# # Function to calculate NDVI with the selected reducer
|
373 |
+
# def calculate_ndvi(image, geometry, reducer_choice):
|
374 |
+
# ndvi = image.normalizedDifference(['B8', 'B4']).rename('NDVI')
|
375 |
+
# return ndvi
|
376 |
+
|
377 |
+
# # Function to calculate NDWI
|
378 |
+
# def calculate_ndwi(image, geometry, reducer_choice):
|
379 |
+
# ndwi = image.normalizedDifference(['B3', 'B8']).rename('NDWI')
|
380 |
+
# return ndwi
|
381 |
+
|
382 |
+
# def calculate_custom_formula(image, geometry, custom_formula, reducer_choice, scale=30):
|
383 |
+
# try:
|
384 |
+
# if "," in custom_formula:
|
385 |
+
# band1, band2 = [b.strip() for b in custom_formula.split(",")]
|
386 |
+
# band_names = image.bandNames().getInfo()
|
387 |
+
# if band1 not in band_names or band2 not in band_names:
|
388 |
+
# raise ValueError(f"One or both bands ({band1}, {band2}) do not exist in the image.")
|
389 |
+
# result = image.normalizedDifference([band1, band2]).rename('custom_formula')
|
390 |
+
# else:
|
391 |
+
# band = custom_formula.strip()
|
392 |
+
# band_names = image.bandNames().getInfo()
|
393 |
+
# if band not in band_names:
|
394 |
+
# raise ValueError(f"The band '{band}' does not exist in the image.")
|
395 |
+
# result = image.select(band).rename('custom_formula')
|
396 |
+
# return result
|
397 |
+
# except Exception as e:
|
398 |
+
# return ee.Image(0).rename('custom_formula').set('error', str(e))
|
399 |
+
|
400 |
+
# # Modify aggregation functions to return the correct time period and aggregated results
|
401 |
+
# def aggregate_data_daily(collection):
|
402 |
+
# # Extract day from the image date (using the exact date)
|
403 |
+
# collection = collection.map(lambda image: image.set('day', ee.Date(image.get('system:time_start')).format('YYYY-MM-dd')))
|
404 |
+
|
405 |
+
# # Group images by day (distinct days)
|
406 |
+
# grouped_by_day = collection.aggregate_array('day').distinct()
|
407 |
+
|
408 |
+
# def calculate_daily_mean(day):
|
409 |
+
# # Filter the collection by the specific day
|
410 |
+
# daily_collection = collection.filter(ee.Filter.eq('day', day))
|
411 |
+
# daily_mean = daily_collection.mean() # Calculate mean for the day
|
412 |
+
# return daily_mean.set('day', day)
|
413 |
+
|
414 |
+
# # Calculate the daily mean for each day
|
415 |
+
# daily_images = ee.List(grouped_by_day.map(calculate_daily_mean))
|
416 |
+
|
417 |
+
# return ee.ImageCollection(daily_images)
|
418 |
+
|
419 |
+
# def aggregate_data_weekly(collection):
|
420 |
+
# # Extract the start date of the week from the image date
|
421 |
+
# collection = collection.map(lambda image: image.set(
|
422 |
+
# 'week_start', ee.Date(image.get('system:time_start'))
|
423 |
+
# .advance(-ee.Date(image.get('system:time_start')).getRelative('day', 'week'), 'day')
|
424 |
+
# ))
|
425 |
+
# # Group images by week start date
|
426 |
+
# grouped_by_week = collection.aggregate_array('week_start').distinct()
|
427 |
+
|
428 |
+
# def calculate_weekly_mean(week_start):
|
429 |
+
# # Filter the collection by the specific week start date
|
430 |
+
# weekly_collection = collection.filter(ee.Filter.eq('week_start', week_start))
|
431 |
+
# weekly_mean = weekly_collection.mean() # Calculate mean for the week
|
432 |
+
# return weekly_mean.set('week_start', week_start)
|
433 |
+
|
434 |
+
# # Calculate the weekly mean for each week
|
435 |
+
# weekly_images = ee.List(grouped_by_week.map(calculate_weekly_mean))
|
436 |
+
# return ee.ImageCollection(weekly_images)
|
437 |
+
|
438 |
+
# def aggregate_data_monthly(collection, start_date, end_date):
|
439 |
+
# # Filter the collection for the specific date range
|
440 |
+
# collection = collection.filterDate(start_date, end_date)
|
441 |
+
|
442 |
+
# # Extract month and year from the image date
|
443 |
+
# collection = collection.map(lambda image: image.set('month', ee.Date(image.get('system:time_start')).format('YYYY-MM')))
|
444 |
+
|
445 |
+
# # Group images by month
|
446 |
+
# grouped_by_month = collection.aggregate_array('month').distinct()
|
447 |
+
|
448 |
+
# def calculate_monthly_mean(month):
|
449 |
+
# monthly_collection = collection.filter(ee.Filter.eq('month', month))
|
450 |
+
# monthly_mean = monthly_collection.mean()
|
451 |
+
# return monthly_mean.set('month', month)
|
452 |
+
|
453 |
+
# # Calculate the monthly mean for each month
|
454 |
+
# monthly_images = ee.List(grouped_by_month.map(calculate_monthly_mean))
|
455 |
+
|
456 |
+
# return ee.ImageCollection(monthly_images)
|
457 |
+
|
458 |
+
# def aggregate_data_yearly(collection):
|
459 |
+
# # Extract year from the image date
|
460 |
+
# collection = collection.map(lambda image: image.set('year', ee.Date(image.get('system:time_start')).format('YYYY')))
|
461 |
+
|
462 |
+
# # Group images by year
|
463 |
+
# grouped_by_year = collection.aggregate_array('year').distinct()
|
464 |
+
|
465 |
+
# def calculate_yearly_mean(year):
|
466 |
+
# yearly_collection = collection.filter(ee.Filter.eq('year', year))
|
467 |
+
# yearly_mean = yearly_collection.mean()
|
468 |
+
# return yearly_mean.set('year', year)
|
469 |
+
|
470 |
+
# # Calculate the yearly mean for each year
|
471 |
+
# yearly_images = ee.List(grouped_by_year.map(calculate_yearly_mean))
|
472 |
+
|
473 |
+
# return ee.ImageCollection(yearly_images)
|
474 |
+
|
475 |
+
# # Function to calculate index based on the selected choice
|
476 |
+
# def calculate_index_for_period(image, roi, index_choice, reducer_choice, custom_formula):
|
477 |
+
# if index_choice.lower() == 'ndvi':
|
478 |
+
# return calculate_ndvi(image, roi, reducer_choice)
|
479 |
+
# elif index_choice.lower() == 'ndwi':
|
480 |
+
# return calculate_ndwi(image, roi, reducer_choice)
|
481 |
+
# elif index_choice.lower() == 'average no₂':
|
482 |
+
# mean_no2 = image.select('NO2').mean().rename('Average NO₂')
|
483 |
+
# return mean_no2
|
484 |
+
# elif index_choice.lower() == 'custom formula':
|
485 |
+
# # Pass the custom formula here, not the index_choice
|
486 |
+
# return calculate_custom_formula(image, roi, custom_formula, reducer_choice)
|
487 |
+
# else:
|
488 |
+
# st.write("Please Select any one option...."+ index_choice.lower())
|
489 |
+
|
490 |
+
# def aggregate_data_weekly(collection):
|
491 |
+
# def set_week_start(image):
|
492 |
+
# # Get the image timestamp
|
493 |
+
# date = ee.Date(image.get('system:time_start'))
|
494 |
+
# # Calculate days since the start of the week (0 = Monday, 6 = Sunday)
|
495 |
+
# days_since_week_start = date.getRelative('day', 'week')
|
496 |
+
# # Convert to ee.Number and negate it to get the offset to the week start
|
497 |
+
# offset = ee.Number(days_since_week_start).multiply(-1)
|
498 |
+
# # Advance the date by the negative offset to get the week start
|
499 |
+
# week_start = date.advance(offset, 'day')
|
500 |
+
# return image.set('week_start', week_start.format('YYYY-MM-dd')) # Ensure string format
|
501 |
+
|
502 |
+
# # Apply the week start calculation to each image
|
503 |
+
# collection = collection.map(set_week_start)
|
504 |
+
|
505 |
+
# # Group images by week start date
|
506 |
+
# grouped_by_week = collection.aggregate_array('week_start').distinct()
|
507 |
+
|
508 |
+
# def calculate_weekly_mean(week_start):
|
509 |
+
# # Filter the collection by the specific week start date
|
510 |
+
# weekly_collection = collection.filter(ee.Filter.eq('week_start', week_start))
|
511 |
+
# weekly_mean = weekly_collection.mean() # Calculate mean for the week
|
512 |
+
# return weekly_mean.set('week_start', week_start)
|
513 |
+
|
514 |
+
# # Calculate the weekly mean for each week
|
515 |
+
# weekly_images = ee.List(grouped_by_week.map(calculate_weekly_mean))
|
516 |
+
# return ee.ImageCollection(weekly_images)
|
517 |
+
|
518 |
+
# def process_aggregation(locations_df, start_date_str, end_date_str, dataset_id, index_choice, reducer_choice, shape_type, aggregation_period, custom_formula=""):
|
519 |
+
# aggregated_results = []
|
520 |
+
|
521 |
+
# if index_choice.lower() == 'custom_formula' and not custom_formula:
|
522 |
+
# st.error("Custom formula cannot be empty. Please provide a formula.")
|
523 |
+
# return aggregated_results
|
524 |
+
|
525 |
+
# total_steps = len(locations_df)
|
526 |
+
# progress_bar = st.progress(0)
|
527 |
+
# progress_text = st.empty()
|
528 |
+
|
529 |
+
# with st.spinner('Processing data...'):
|
530 |
+
# if shape_type.lower() == "point":
|
531 |
+
# for idx, row in locations_df.iterrows():
|
532 |
+
# latitude = row.get('latitude')
|
533 |
+
# longitude = row.get('longitude')
|
534 |
+
# if pd.isna(latitude) or pd.isna(longitude):
|
535 |
+
# st.warning(f"Skipping location {idx} with missing latitude or longitude")
|
536 |
+
# continue
|
537 |
+
|
538 |
+
# location_name = row.get('name', f"Location_{idx}")
|
539 |
+
# roi = ee.Geometry.Point([longitude, latitude])
|
540 |
+
|
541 |
+
# collection = ee.ImageCollection(dataset_id) \
|
542 |
+
# .filterDate(ee.Date(start_date_str), ee.Date(end_date_str)) \
|
543 |
+
# .filterBounds(roi)
|
544 |
+
|
545 |
+
# # Aggregate data based on the selected period
|
546 |
+
# if aggregation_period.lower() == 'daily':
|
547 |
+
# collection = aggregate_data_daily(collection)
|
548 |
+
# elif aggregation_period.lower() == 'weekly':
|
549 |
+
# collection = aggregate_data_weekly(collection)
|
550 |
+
# elif aggregation_period.lower() == 'monthly':
|
551 |
+
# collection = aggregate_data_monthly(collection, start_date_str, end_date_str)
|
552 |
+
# elif aggregation_period.lower() == 'yearly':
|
553 |
+
# collection = aggregate_data_yearly(collection)
|
554 |
+
|
555 |
+
# # Process each image in the collection
|
556 |
+
# image_list = collection.toList(collection.size())
|
557 |
+
# processed_weeks = set() # Track processed weeks to avoid duplicates
|
558 |
+
# for i in range(image_list.size().getInfo()):
|
559 |
+
# image = ee.Image(image_list.get(i))
|
560 |
+
|
561 |
+
# if aggregation_period.lower() == 'daily':
|
562 |
+
# timestamp = image.get('day')
|
563 |
+
# period_label = 'Date'
|
564 |
+
# date = ee.Date(timestamp).format('YYYY-MM-dd').getInfo()
|
565 |
+
# elif aggregation_period.lower() == 'weekly':
|
566 |
+
# timestamp = image.get('week_start')
|
567 |
+
# period_label = 'Week'
|
568 |
+
# date = ee.String(timestamp).getInfo() # Already formatted as YYYY-MM-dd
|
569 |
+
# # Skip if week is outside the date range or already processed
|
570 |
+
# if (pd.to_datetime(date) < pd.to_datetime(start_date_str) or
|
571 |
+
# pd.to_datetime(date) > pd.to_datetime(end_date_str) or
|
572 |
+
# date in processed_weeks):
|
573 |
+
# continue
|
574 |
+
# processed_weeks.add(date)
|
575 |
+
# elif aggregation_period.lower() == 'monthly':
|
576 |
+
# timestamp = image.get('month')
|
577 |
+
# period_label = 'Month'
|
578 |
+
# date = ee.Date(timestamp).format('YYYY-MM').getInfo()
|
579 |
+
# elif aggregation_period.lower() == 'yearly':
|
580 |
+
# timestamp = image.get('year')
|
581 |
+
# period_label = 'Year'
|
582 |
+
# date = ee.Date(timestamp).format('YYYY').getInfo()
|
583 |
+
|
584 |
+
# index_image = calculate_index_for_period(image, roi, index_choice, reducer_choice, custom_formula)
|
585 |
+
|
586 |
+
# try:
|
587 |
+
# index_value = index_image.reduceRegion(
|
588 |
+
# reducer=get_reducer(reducer_choice),
|
589 |
+
# geometry=roi,
|
590 |
+
# scale=30
|
591 |
+
# ).get(index_image.bandNames().get(0))
|
592 |
+
|
593 |
+
# calculated_value = index_value.getInfo()
|
594 |
+
|
595 |
+
# if isinstance(calculated_value, (int, float)):
|
596 |
+
# aggregated_results.append({
|
597 |
+
# 'Location Name': location_name,
|
598 |
+
# 'Latitude': latitude,
|
599 |
+
# 'Longitude': longitude,
|
600 |
+
# period_label: date,
|
601 |
+
# 'Start Date': start_date_str,
|
602 |
+
# 'End Date': end_date_str,
|
603 |
+
# 'Calculated Value': calculated_value
|
604 |
+
# })
|
605 |
+
# else:
|
606 |
+
# st.warning(f"Skipping invalid value for {location_name} on {date}")
|
607 |
+
# except Exception as e:
|
608 |
+
# st.error(f"Error retrieving value for {location_name}: {e}")
|
609 |
+
|
610 |
+
# progress_percentage = (idx + 1) / total_steps
|
611 |
+
# progress_bar.progress(progress_percentage)
|
612 |
+
# progress_text.markdown(f"Processing: {int(progress_percentage * 100)}%")
|
613 |
+
|
614 |
+
# elif shape_type.lower() == "polygon":
|
615 |
+
# for idx, row in locations_df.iterrows():
|
616 |
+
# polygon_name = row.get('name', f"Polygon_{idx}")
|
617 |
+
# polygon_geometry = row.get('geometry')
|
618 |
+
# location_name = polygon_name
|
619 |
+
|
620 |
+
# try:
|
621 |
+
# roi = convert_to_ee_geometry(polygon_geometry)
|
622 |
+
# except ValueError as e:
|
623 |
+
# st.warning(f"Skipping invalid polygon {polygon_name}: {e}")
|
624 |
+
# continue
|
625 |
+
|
626 |
+
# collection = ee.ImageCollection(dataset_id) \
|
627 |
+
# .filterDate(ee.Date(start_date_str), ee.Date(end_date_str)) \
|
628 |
+
# .filterBounds(roi)
|
629 |
+
|
630 |
+
# # Aggregate data based on the selected period
|
631 |
+
# if aggregation_period.lower() == 'daily':
|
632 |
+
# collection = aggregate_data_daily(collection)
|
633 |
+
# elif aggregation_period.lower() == 'weekly':
|
634 |
+
# collection = aggregate_data_weekly(collection)
|
635 |
+
# elif aggregation_period.lower() == 'monthly':
|
636 |
+
# collection = aggregate_data_monthly(collection, start_date_str, end_date_str)
|
637 |
+
# elif aggregation_period.lower() == 'yearly':
|
638 |
+
# collection = aggregate_data_yearly(collection)
|
639 |
+
|
640 |
+
# # Process each image in the collection
|
641 |
+
# image_list = collection.toList(collection.size())
|
642 |
+
# processed_weeks = set() # Track processed weeks to avoid duplicates
|
643 |
+
# for i in range(image_list.size().getInfo()):
|
644 |
+
# image = ee.Image(image_list.get(i))
|
645 |
+
|
646 |
+
# if aggregation_period.lower() == 'daily':
|
647 |
+
# timestamp = image.get('day')
|
648 |
+
# period_label = 'Date'
|
649 |
+
# date = ee.Date(timestamp).format('YYYY-MM-dd').getInfo()
|
650 |
+
# elif aggregation_period.lower() == 'weekly':
|
651 |
+
# timestamp = image.get('week_start')
|
652 |
+
# period_label = 'Week'
|
653 |
+
# date = ee.String(timestamp).getInfo() # Already formatted as YYYY-MM-dd
|
654 |
+
# # Skip if week is outside the date range or already processed
|
655 |
+
# if (pd.to_datetime(date) < pd.to_datetime(start_date_str) or
|
656 |
+
# pd.to_datetime(date) > pd.to_datetime(end_date_str) or
|
657 |
+
# date in processed_weeks):
|
658 |
+
# continue
|
659 |
+
# processed_weeks.add(date)
|
660 |
+
# elif aggregation_period.lower() == 'monthly':
|
661 |
+
# timestamp = image.get('month')
|
662 |
+
# period_label = 'Month'
|
663 |
+
# date = ee.Date(timestamp).format('YYYY-MM').getInfo()
|
664 |
+
# elif aggregation_period.lower() == 'yearly':
|
665 |
+
# timestamp = image.get('year')
|
666 |
+
# period_label = 'Year'
|
667 |
+
# date = ee.Date(timestamp).format('YYYY').getInfo()
|
668 |
+
|
669 |
+
# index_image = calculate_index_for_period(image, roi, index_choice, reducer_choice, custom_formula)
|
670 |
+
|
671 |
+
# try:
|
672 |
+
# index_value = index_image.reduceRegion(
|
673 |
+
# reducer=get_reducer(reducer_choice),
|
674 |
+
# geometry=roi,
|
675 |
+
# scale=30
|
676 |
+
# ).get(index_image.bandNames().get(0))
|
677 |
+
|
678 |
+
# calculated_value = index_value.getInfo()
|
679 |
+
|
680 |
+
# if isinstance(calculated_value, (int, float)):
|
681 |
+
# aggregated_results.append({
|
682 |
+
# 'Location Name': location_name,
|
683 |
+
# period_label: date,
|
684 |
+
# 'Start Date': start_date_str,
|
685 |
+
# 'End Date': end_date_str,
|
686 |
+
# 'Calculated Value': calculated_value
|
687 |
+
# })
|
688 |
+
# else:
|
689 |
+
# st.warning(f"Skipping invalid value for {location_name} on {date}")
|
690 |
+
# except Exception as e:
|
691 |
+
# st.error(f"Error retrieving value for {location_name}: {e}")
|
692 |
+
|
693 |
+
# progress_percentage = (idx + 1) / total_steps
|
694 |
+
# progress_bar.progress(progress_percentage)
|
695 |
+
# progress_text.markdown(f"Processing: {int(progress_percentage * 100)}%")
|
696 |
+
|
697 |
+
# # if aggregated_results:
|
698 |
+
# # result_df = pd.DataFrame(aggregated_results)
|
699 |
+
# # if aggregation_period.lower() == 'daily':
|
700 |
+
# # aggregated_output = result_df.groupby('Location Name').agg({
|
701 |
+
# # 'Latitude': 'first' if shape_type.lower() == 'point' else None,
|
702 |
+
# # 'Longitude': 'first' if shape_type.lower() == 'point' else None,
|
703 |
+
# # 'Start Date': 'first',
|
704 |
+
# # 'End Date': 'first',
|
705 |
+
# # 'Calculated Value': 'mean'
|
706 |
+
# # }).reset_index()
|
707 |
+
# # # Remove None columns (for polygons)
|
708 |
+
# # aggregated_output = aggregated_output[[col for col in aggregated_output.columns if col is not None]]
|
709 |
+
# # aggregated_output.rename(columns={'Calculated Value': 'Aggregated Value'}, inplace=True)
|
710 |
+
# # return aggregated_output.to_dict(orient='records')
|
711 |
+
# # else:
|
712 |
+
# # return result_df.to_dict(orient='records')
|
713 |
+
|
714 |
+
# # return []
|
715 |
+
|
716 |
+
# if aggregated_results:
|
717 |
+
# result_df = pd.DataFrame(aggregated_results)
|
718 |
+
# if aggregation_period.lower() == 'daily':
|
719 |
+
# # Define aggregation dictionary based on shape_type
|
720 |
+
# agg_dict = {
|
721 |
+
# 'Start Date': 'first',
|
722 |
+
# 'End Date': 'first',
|
723 |
+
# 'Calculated Value': 'mean'
|
724 |
+
# }
|
725 |
+
# if shape_type.lower() == 'point':
|
726 |
+
# agg_dict['Latitude'] = 'first'
|
727 |
+
# agg_dict['Longitude'] = 'first'
|
728 |
+
|
729 |
+
# aggregated_output = result_df.groupby('Location Name').agg(agg_dict).reset_index()
|
730 |
+
# aggregated_output.rename(columns={'Calculated Value': 'Aggregated Value'}, inplace=True)
|
731 |
+
# return aggregated_output.to_dict(orient='records')
|
732 |
+
# else:
|
733 |
+
# return result_df.to_dict(orient='records')
|
734 |
+
|
735 |
+
# return []
|
736 |
+
|
737 |
+
# # When the user clicks the process button, start the calculation
|
738 |
+
# if st.button(f"Calculate ({index_choice})"):
|
739 |
+
# if file_upload is not None:
|
740 |
+
# if shape_type.lower() == "point":
|
741 |
+
# results = process_aggregation(
|
742 |
+
# locations_df,
|
743 |
+
# start_date_str,
|
744 |
+
# end_date_str,
|
745 |
+
# dataset_id,
|
746 |
+
# index_choice,
|
747 |
+
# reducer_choice,
|
748 |
+
# shape_type,
|
749 |
+
# aggregation_period,
|
750 |
+
# custom_formula
|
751 |
+
# )
|
752 |
+
# if results:
|
753 |
+
# result_df = pd.DataFrame(results)
|
754 |
+
# st.write(f"Processed Results Table ({aggregation_period}):")
|
755 |
+
# st.dataframe(result_df)
|
756 |
+
# filename = f"{main_selection}_{dataset_id}_{start_date.strftime('%Y/%m/%d')}_{end_date.strftime('%Y/%m/%d')}_{aggregation_period.lower()}.csv"
|
757 |
+
# st.download_button(
|
758 |
+
# label="Download results as CSV",
|
759 |
+
# data=result_df.to_csv(index=False).encode('utf-8'),
|
760 |
+
# file_name=filename,
|
761 |
+
# mime='text/csv'
|
762 |
+
# )
|
763 |
+
# st.spinner('')
|
764 |
+
# st.success('Processing complete!')
|
765 |
+
# else:
|
766 |
+
# st.warning("No results were generated.")
|
767 |
+
|
768 |
+
# elif shape_type.lower() == "polygon":
|
769 |
+
# results = process_aggregation(
|
770 |
+
# locations_df,
|
771 |
+
# start_date_str,
|
772 |
+
# end_date_str,
|
773 |
+
# dataset_id,
|
774 |
+
# index_choice,
|
775 |
+
# reducer_choice,
|
776 |
+
# shape_type,
|
777 |
+
# aggregation_period,
|
778 |
+
# custom_formula
|
779 |
+
# )
|
780 |
+
# if results:
|
781 |
+
# result_df = pd.DataFrame(results)
|
782 |
+
# st.write(f"Processed Results Table ({aggregation_period}):")
|
783 |
+
# st.dataframe(result_df)
|
784 |
+
# filename = f"{main_selection}_{dataset_id}_{start_date.strftime('%Y/%m/%d')}_{end_date.strftime('%Y/%m/%d')}_{aggregation_period.lower()}.csv"
|
785 |
+
# st.download_button(
|
786 |
+
# label="Download results as CSV",
|
787 |
+
# data=result_df.to_csv(index=False).encode('utf-8'),
|
788 |
+
# file_name=filename,
|
789 |
+
# mime='text/csv'
|
790 |
+
# )
|
791 |
+
# st.spinner('')
|
792 |
+
# st.success('Processing complete!')
|
793 |
+
# else:
|
794 |
+
# st.warning("No results were generated.")
|
795 |
+
|
796 |
+
# else:
|
797 |
+
# st.warning("Please upload a file.")
|
798 |
+
|
799 |
+
|
800 |
+
|
801 |
import streamlit as st
|
802 |
import json
|
803 |
import ee
|
|
|
844 |
""",
|
845 |
unsafe_allow_html=True,
|
846 |
)
|
847 |
+
st.write("<h2><div style='text-align: center;'>User Inputs</div></h2>", unsafe_allow_html=True)
|
848 |
|
849 |
# Authenticate and initialize Earth Engine
|
850 |
earthengine_credentials = os.environ.get("EE_Authentication")
|
|
|
873 |
|
874 |
# Display the selected dataset ID based on user input
|
875 |
if sub_selection:
|
876 |
+
st.write(f"You selected: {main_selection} -> {sub_options[sub_selection]}")
|
877 |
+
st.write(f"Dataset ID: {sub_selection}")
|
878 |
+
dataset_id = sub_selection # Use the key directly as the dataset ID
|
|
|
|
|
879 |
|
880 |
# Earth Engine Index Calculator Section
|
881 |
st.header("Earth Engine Index Calculator")
|
882 |
|
883 |
+
# Load band information based on selected dataset
|
884 |
+
if main_selection and sub_selection:
|
885 |
+
dataset_bands = data[main_selection]["bands"].get(sub_selection, [])
|
886 |
+
st.write(f"Available Bands for {sub_options[sub_selection]}: {', '.join(dataset_bands)}")
|
887 |
+
|
888 |
+
# Allow user to select 1 or 2 bands
|
889 |
+
selected_bands = st.multiselect(
|
890 |
+
"Select 1 or 2 Bands for Calculation",
|
891 |
+
options=dataset_bands,
|
892 |
+
default=[dataset_bands[0]] if dataset_bands else [],
|
893 |
+
help="Select at least 1 band and up to 2 bands."
|
894 |
+
)
|
895 |
+
|
896 |
+
# Ensure minimum 1 and maximum 2 bands are selected
|
897 |
+
if len(selected_bands) < 1:
|
898 |
+
st.warning("Please select at least one band.")
|
899 |
+
st.stop()
|
900 |
+
elif len(selected_bands) > 2:
|
901 |
+
st.warning("You can select a maximum of 2 bands.")
|
902 |
+
st.stop()
|
903 |
+
|
904 |
+
# Show custom formula input if bands are selected
|
905 |
+
if selected_bands:
|
906 |
+
default_formula = (
|
907 |
+
f"{selected_bands[0]}" if len(selected_bands) == 1
|
908 |
+
else f"({selected_bands[0]} - {selected_bands[1]}) / ({selected_bands[0]} + {selected_bands[1]})"
|
909 |
+
)
|
910 |
+
custom_formula = st.text_input(
|
911 |
+
"Enter Custom Formula (e.g., 'B3*B5/2' or '(B8 - B4) / (B8 + B4)')",
|
912 |
+
value=default_formula,
|
913 |
+
help=f"Use {', '.join(selected_bands)} in your formula. Example: 'B3*B5/2'"
|
914 |
+
)
|
915 |
+
|
916 |
+
if not custom_formula:
|
917 |
+
st.warning("Please enter a custom formula to proceed.")
|
918 |
+
st.stop()
|
919 |
+
|
920 |
+
# Display the formula
|
921 |
+
st.write(f"Custom Formula: {custom_formula}")
|
922 |
|
923 |
# Function to get the corresponding reducer based on user input
|
924 |
def get_reducer(reducer_name):
|
|
|
933 |
'max': ee.Reducer.max(),
|
934 |
'count': ee.Reducer.count(),
|
935 |
}
|
|
|
|
|
936 |
return reducers.get(reducer_name.lower(), ee.Reducer.mean())
|
937 |
|
938 |
# Streamlit selectbox for reducer choice
|
|
|
942 |
index=0 # Default to 'mean'
|
943 |
)
|
944 |
|
945 |
+
# Function to convert geometry to Earth Engine format
|
946 |
def convert_to_ee_geometry(geometry):
|
|
|
947 |
if isinstance(geometry, base.BaseGeometry):
|
948 |
if geometry.is_valid:
|
949 |
geojson = geometry.__geo_interface__
|
|
|
950 |
return ee.Geometry(geojson)
|
951 |
else:
|
952 |
raise ValueError("Invalid geometry: The polygon geometry is not valid.")
|
|
|
|
|
953 |
elif isinstance(geometry, dict) or isinstance(geometry, str):
|
954 |
try:
|
955 |
if isinstance(geometry, str):
|
956 |
geometry = json.loads(geometry)
|
957 |
if 'type' in geometry and 'coordinates' in geometry:
|
|
|
958 |
return ee.Geometry(geometry)
|
959 |
else:
|
960 |
raise ValueError("GeoJSON format is invalid.")
|
961 |
except Exception as e:
|
962 |
raise ValueError(f"Error parsing GeoJSON: {e}")
|
|
|
|
|
963 |
elif isinstance(geometry, str) and geometry.lower().endswith(".kml"):
|
964 |
try:
|
|
|
965 |
tree = ET.parse(geometry)
|
966 |
kml_root = tree.getroot()
|
|
|
|
|
|
|
967 |
kml_namespace = {'kml': 'http://www.opengis.net/kml/2.2'}
|
968 |
coordinates = kml_root.findall(".//kml:coordinates", kml_namespace)
|
|
|
969 |
if coordinates:
|
|
|
970 |
coords_text = coordinates[0].text.strip()
|
971 |
coords = coords_text.split()
|
|
|
972 |
coords = [tuple(map(float, coord.split(','))) for coord in coords]
|
973 |
+
geojson = {"type": "Polygon", "coordinates": [coords]}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
974 |
return ee.Geometry(geojson)
|
975 |
else:
|
976 |
raise ValueError("KML does not contain valid coordinates.")
|
977 |
except Exception as e:
|
978 |
raise ValueError(f"Error parsing KML: {e}")
|
|
|
979 |
else:
|
980 |
raise ValueError("Unsupported geometry input type. Supported types are Shapely, GeoJSON, and KML.")
|
981 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
982 |
# Date Input for Start and End Dates
|
983 |
start_date = st.date_input("Start Date", value=pd.to_datetime('2024-11-01'))
|
984 |
end_date = st.date_input("End Date", value=pd.to_datetime('2024-12-01'))
|
|
|
990 |
# Aggregation period selection
|
991 |
aggregation_period = st.selectbox("Select Aggregation Period", ["Daily", "Weekly", "Monthly", "Yearly"], index=0)
|
992 |
|
993 |
+
# Ask user whether they want to process 'Point' or 'Polygon' data
|
994 |
shape_type = st.selectbox("Do you want to process 'Point' or 'Polygon' data?", ["Point", "Polygon"])
|
995 |
|
996 |
+
# Ask user to upload a file based on shape type
|
997 |
file_upload = st.file_uploader(f"Upload your {shape_type} data (CSV, GeoJSON, KML)", type=["csv", "geojson", "kml"])
|
998 |
|
999 |
+
# Additional options based on shape type
|
1000 |
+
kernel_size = None
|
1001 |
+
include_boundary = None
|
1002 |
+
if shape_type.lower() == "point":
|
1003 |
+
kernel_size = st.selectbox(
|
1004 |
+
"Select Calculation Area",
|
1005 |
+
["Point", "3x3 Kernel", "5x5 Kernel"],
|
1006 |
+
index=0,
|
1007 |
+
help="Choose 'Point' for exact point calculation, or a kernel size for area averaging."
|
1008 |
+
)
|
1009 |
+
elif shape_type.lower() == "polygon":
|
1010 |
+
include_boundary = st.checkbox(
|
1011 |
+
"Include Boundary Pixels",
|
1012 |
+
value=True,
|
1013 |
+
help="Check to include pixels on the polygon boundary; uncheck to exclude them."
|
1014 |
+
)
|
1015 |
+
|
1016 |
if file_upload is not None:
|
1017 |
# Read the user-uploaded file
|
1018 |
if shape_type.lower() == "point":
|
|
|
1019 |
if file_upload.name.endswith('.csv'):
|
1020 |
locations_df = pd.read_csv(file_upload)
|
1021 |
elif file_upload.name.endswith('.geojson'):
|
|
|
1026 |
st.error("Unsupported file format. Please upload CSV, GeoJSON, or KML.")
|
1027 |
locations_df = pd.DataFrame()
|
1028 |
|
|
|
1029 |
if 'geometry' in locations_df.columns:
|
|
|
1030 |
if locations_df.geometry.geom_type.isin(['Polygon', 'MultiPolygon']).any():
|
1031 |
st.warning("The uploaded file contains polygon data. Please select 'Polygon' for processing.")
|
1032 |
+
st.stop()
|
1033 |
|
|
|
1034 |
with st.spinner('Processing Map...'):
|
1035 |
if locations_df is not None and not locations_df.empty:
|
|
|
1036 |
if 'geometry' in locations_df.columns:
|
|
|
1037 |
locations_df['latitude'] = locations_df['geometry'].y
|
1038 |
locations_df['longitude'] = locations_df['geometry'].x
|
1039 |
|
|
|
1040 |
if 'latitude' not in locations_df.columns or 'longitude' not in locations_df.columns:
|
1041 |
st.error("Uploaded file is missing required 'latitude' or 'longitude' columns.")
|
1042 |
else:
|
|
|
1043 |
st.write("Preview of the uploaded points data:")
|
1044 |
st.dataframe(locations_df.head())
|
|
|
|
|
1045 |
m = leafmap.Map(center=[locations_df['latitude'].mean(), locations_df['longitude'].mean()], zoom=10)
|
|
|
|
|
1046 |
for _, row in locations_df.iterrows():
|
1047 |
latitude = row['latitude']
|
1048 |
longitude = row['longitude']
|
|
|
|
|
1049 |
if pd.isna(latitude) or pd.isna(longitude):
|
1050 |
+
continue
|
|
|
1051 |
m.add_marker(location=[latitude, longitude], popup=row.get('name', 'No Name'))
|
|
|
|
|
1052 |
st.write("Map of Uploaded Points:")
|
1053 |
m.to_streamlit()
|
|
|
|
|
1054 |
st.session_state.map_data = m
|
1055 |
|
1056 |
elif shape_type.lower() == "polygon":
|
|
|
1057 |
if file_upload.name.endswith('.csv'):
|
1058 |
locations_df = pd.read_csv(file_upload)
|
1059 |
elif file_upload.name.endswith('.geojson'):
|
|
|
1064 |
st.error("Unsupported file format. Please upload CSV, GeoJSON, or KML.")
|
1065 |
locations_df = pd.DataFrame()
|
1066 |
|
|
|
1067 |
if 'geometry' in locations_df.columns:
|
|
|
1068 |
if locations_df.geometry.geom_type.isin(['Point', 'MultiPoint']).any():
|
1069 |
st.warning("The uploaded file contains point data. Please select 'Point' for processing.")
|
1070 |
+
st.stop()
|
1071 |
|
|
|
1072 |
with st.spinner('Processing Map...'):
|
1073 |
if locations_df is not None and not locations_df.empty:
|
|
|
1074 |
if 'geometry' not in locations_df.columns:
|
1075 |
st.error("Uploaded file is missing required 'geometry' column.")
|
1076 |
else:
|
|
|
1077 |
st.write("Preview of the uploaded polygons data:")
|
1078 |
st.dataframe(locations_df.head())
|
|
|
|
|
|
|
1079 |
centroid_lat = locations_df.geometry.centroid.y.mean()
|
1080 |
centroid_lon = locations_df.geometry.centroid.x.mean()
|
|
|
1081 |
m = leafmap.Map(center=[centroid_lat, centroid_lon], zoom=10)
|
|
|
|
|
1082 |
for _, row in locations_df.iterrows():
|
1083 |
polygon = row['geometry']
|
1084 |
+
if polygon.is_valid:
|
|
|
1085 |
gdf = gpd.GeoDataFrame([row], geometry=[polygon], crs=locations_df.crs)
|
1086 |
m.add_gdf(gdf=gdf, layer_name=row.get('name', 'Unnamed Polygon'))
|
|
|
|
|
1087 |
st.write("Map of Uploaded Polygons:")
|
1088 |
m.to_streamlit()
|
|
|
|
|
1089 |
st.session_state.map_data = m
|
1090 |
|
1091 |
+
# Initialize session state for storing results
|
1092 |
if 'results' not in st.session_state:
|
1093 |
st.session_state.results = []
|
1094 |
if 'last_params' not in st.session_state:
|
1095 |
st.session_state.last_params = {}
|
1096 |
if 'map_data' not in st.session_state:
|
1097 |
+
st.session_state.map_data = None
|
1098 |
|
1099 |
# Function to check if parameters have changed
|
1100 |
def parameters_changed():
|
1101 |
return (
|
1102 |
st.session_state.last_params.get('main_selection') != main_selection or
|
1103 |
st.session_state.last_params.get('dataset_id') != dataset_id or
|
1104 |
+
st.session_state.last_params.get('selected_bands') != selected_bands or
|
1105 |
+
st.session_state.last_params.get('custom_formula') != custom_formula or
|
1106 |
st.session_state.last_params.get('start_date_str') != start_date_str or
|
1107 |
st.session_state.last_params.get('end_date_str') != end_date_str or
|
1108 |
st.session_state.last_params.get('shape_type') != shape_type or
|
1109 |
+
st.session_state.last_params.get('file_upload') != file_upload or
|
1110 |
+
st.session_state.last_params.get('kernel_size') != kernel_size or
|
1111 |
+
st.session_state.last_params.get('include_boundary') != include_boundary
|
1112 |
)
|
1113 |
|
1114 |
# If parameters have changed, reset the results
|
1115 |
if parameters_changed():
|
1116 |
+
st.session_state.results = []
|
1117 |
st.session_state.last_params = {
|
1118 |
'main_selection': main_selection,
|
1119 |
'dataset_id': dataset_id,
|
1120 |
+
'selected_bands': selected_bands,
|
1121 |
+
'custom_formula': custom_formula,
|
1122 |
'start_date_str': start_date_str,
|
1123 |
'end_date_str': end_date_str,
|
1124 |
'shape_type': shape_type,
|
1125 |
+
'file_upload': file_upload,
|
1126 |
+
'kernel_size': kernel_size,
|
1127 |
+
'include_boundary': include_boundary
|
1128 |
}
|
1129 |
|
1130 |
+
# Function to calculate custom formula using eval safely
|
1131 |
+
def calculate_custom_formula(image, geometry, selected_bands, custom_formula, reducer_choice, scale=30):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1132 |
try:
|
1133 |
+
band_values = {}
|
1134 |
+
for band in selected_bands:
|
|
|
|
|
|
|
|
|
|
|
|
|
1135 |
band_names = image.bandNames().getInfo()
|
1136 |
if band not in band_names:
|
1137 |
raise ValueError(f"The band '{band}' does not exist in the image.")
|
1138 |
+
band_values[band] = image.select(band)
|
1139 |
+
|
1140 |
+
reducer = get_reducer(reducer_choice)
|
1141 |
+
reduced_values = {}
|
1142 |
+
for band in selected_bands:
|
1143 |
+
reduced_value = band_values[band].reduceRegion(
|
1144 |
+
reducer=reducer,
|
1145 |
+
geometry=geometry,
|
1146 |
+
scale=scale
|
1147 |
+
).get(band).getInfo()
|
1148 |
+
if reduced_value is None:
|
1149 |
+
reduced_value = 0
|
1150 |
+
reduced_values[band] = float(reduced_value)
|
1151 |
+
|
1152 |
+
formula = custom_formula
|
1153 |
+
for band in selected_bands:
|
1154 |
+
formula = formula.replace(band, str(reduced_values[band]))
|
1155 |
+
|
1156 |
+
result = eval(formula, {"__builtins__": {}}, reduced_values)
|
1157 |
+
if not isinstance(result, (int, float)):
|
1158 |
+
raise ValueError("Formula evaluation did not result in a numeric value.")
|
1159 |
+
return ee.Image.constant(result).rename('custom_result')
|
1160 |
+
|
1161 |
+
except ZeroDivisionError:
|
1162 |
+
st.error("Error: Division by zero occurred in the formula.")
|
1163 |
+
return ee.Image(0).rename('custom_result').set('error', 'Division by zero')
|
1164 |
+
except SyntaxError:
|
1165 |
+
st.error(f"Error: Invalid formula syntax in '{custom_formula}'.")
|
1166 |
+
return ee.Image(0).rename('custom_result').set('error', 'Invalid syntax')
|
1167 |
+
except ValueError as e:
|
1168 |
+
st.error(f"Error: {str(e)}")
|
1169 |
+
return ee.Image(0).rename('custom_result').set('error', str(e))
|
1170 |
except Exception as e:
|
1171 |
+
st.error(f"Unexpected error evaluating formula: {e}")
|
1172 |
+
return ee.Image(0).rename('custom_result').set('error', str(e))
|
1173 |
+
|
1174 |
+
# Function to calculate index for a period
|
1175 |
+
def calculate_index_for_period(image, roi, selected_bands, custom_formula, reducer_choice):
|
1176 |
+
return calculate_custom_formula(image, roi, selected_bands, custom_formula, reducer_choice)
|
1177 |
+
|
1178 |
+
# Aggregation functions
|
1179 |
def aggregate_data_daily(collection):
|
|
|
1180 |
collection = collection.map(lambda image: image.set('day', ee.Date(image.get('system:time_start')).format('YYYY-MM-dd')))
|
|
|
|
|
1181 |
grouped_by_day = collection.aggregate_array('day').distinct()
|
|
|
1182 |
def calculate_daily_mean(day):
|
|
|
1183 |
daily_collection = collection.filter(ee.Filter.eq('day', day))
|
1184 |
+
daily_mean = daily_collection.mean()
|
1185 |
return daily_mean.set('day', day)
|
|
|
|
|
1186 |
daily_images = ee.List(grouped_by_day.map(calculate_daily_mean))
|
|
|
1187 |
return ee.ImageCollection(daily_images)
|
1188 |
|
1189 |
def aggregate_data_weekly(collection):
|
1190 |
+
def set_week_start(image):
|
1191 |
+
date = ee.Date(image.get('system:time_start'))
|
1192 |
+
days_since_week_start = date.getRelative('day', 'week')
|
1193 |
+
offset = ee.Number(days_since_week_start).multiply(-1)
|
1194 |
+
week_start = date.advance(offset, 'day')
|
1195 |
+
return image.set('week_start', week_start.format('YYYY-MM-dd'))
|
1196 |
+
collection = collection.map(set_week_start)
|
1197 |
grouped_by_week = collection.aggregate_array('week_start').distinct()
|
|
|
1198 |
def calculate_weekly_mean(week_start):
|
|
|
1199 |
weekly_collection = collection.filter(ee.Filter.eq('week_start', week_start))
|
1200 |
+
weekly_mean = weekly_collection.mean()
|
1201 |
return weekly_mean.set('week_start', week_start)
|
|
|
|
|
1202 |
weekly_images = ee.List(grouped_by_week.map(calculate_weekly_mean))
|
1203 |
return ee.ImageCollection(weekly_images)
|
1204 |
+
|
1205 |
def aggregate_data_monthly(collection, start_date, end_date):
|
|
|
1206 |
collection = collection.filterDate(start_date, end_date)
|
|
|
|
|
1207 |
collection = collection.map(lambda image: image.set('month', ee.Date(image.get('system:time_start')).format('YYYY-MM')))
|
|
|
|
|
1208 |
grouped_by_month = collection.aggregate_array('month').distinct()
|
|
|
1209 |
def calculate_monthly_mean(month):
|
1210 |
monthly_collection = collection.filter(ee.Filter.eq('month', month))
|
1211 |
monthly_mean = monthly_collection.mean()
|
1212 |
return monthly_mean.set('month', month)
|
|
|
|
|
1213 |
monthly_images = ee.List(grouped_by_month.map(calculate_monthly_mean))
|
|
|
1214 |
return ee.ImageCollection(monthly_images)
|
1215 |
+
|
1216 |
def aggregate_data_yearly(collection):
|
|
|
1217 |
collection = collection.map(lambda image: image.set('year', ee.Date(image.get('system:time_start')).format('YYYY')))
|
|
|
|
|
1218 |
grouped_by_year = collection.aggregate_array('year').distinct()
|
|
|
1219 |
def calculate_yearly_mean(year):
|
1220 |
yearly_collection = collection.filter(ee.Filter.eq('year', year))
|
1221 |
yearly_mean = yearly_collection.mean()
|
1222 |
return yearly_mean.set('year', year)
|
|
|
|
|
1223 |
yearly_images = ee.List(grouped_by_year.map(calculate_yearly_mean))
|
|
|
1224 |
return ee.ImageCollection(yearly_images)
|
1225 |
|
1226 |
+
# Process aggregation function with kernel and boundary options
|
1227 |
+
def process_aggregation(locations_df, start_date_str, end_date_str, dataset_id, selected_bands, reducer_choice, shape_type, aggregation_period, custom_formula="", kernel_size=None, include_boundary=None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1228 |
aggregated_results = []
|
1229 |
|
1230 |
+
if not custom_formula:
|
1231 |
st.error("Custom formula cannot be empty. Please provide a formula.")
|
1232 |
return aggregated_results
|
1233 |
|
|
|
1245 |
continue
|
1246 |
|
1247 |
location_name = row.get('name', f"Location_{idx}")
|
1248 |
+
|
1249 |
+
# Define the region of interest based on kernel size
|
1250 |
+
if kernel_size == "3x3 Kernel":
|
1251 |
+
# Assuming 30m resolution, 3x3 kernel = 90m x 90m
|
1252 |
+
buffer_size = 45 # Half of 90m to center the square
|
1253 |
+
roi = ee.Geometry.Point([longitude, latitude]).buffer(buffer_size).bounds()
|
1254 |
+
elif kernel_size == "5x5 Kernel":
|
1255 |
+
# 5x5 kernel = 150m x 150m
|
1256 |
+
buffer_size = 75 # Half of 150m
|
1257 |
+
roi = ee.Geometry.Point([longitude, latitude]).buffer(buffer_size).bounds()
|
1258 |
+
else: # Point
|
1259 |
+
roi = ee.Geometry.Point([longitude, latitude])
|
1260 |
|
1261 |
collection = ee.ImageCollection(dataset_id) \
|
1262 |
.filterDate(ee.Date(start_date_str), ee.Date(end_date_str)) \
|
1263 |
.filterBounds(roi)
|
1264 |
|
|
|
1265 |
if aggregation_period.lower() == 'daily':
|
1266 |
collection = aggregate_data_daily(collection)
|
1267 |
elif aggregation_period.lower() == 'weekly':
|
|
|
1271 |
elif aggregation_period.lower() == 'yearly':
|
1272 |
collection = aggregate_data_yearly(collection)
|
1273 |
|
|
|
1274 |
image_list = collection.toList(collection.size())
|
1275 |
+
processed_weeks = set()
|
1276 |
for i in range(image_list.size().getInfo()):
|
1277 |
image = ee.Image(image_list.get(i))
|
1278 |
|
|
|
1283 |
elif aggregation_period.lower() == 'weekly':
|
1284 |
timestamp = image.get('week_start')
|
1285 |
period_label = 'Week'
|
1286 |
+
date = ee.String(timestamp).getInfo()
|
|
|
1287 |
if (pd.to_datetime(date) < pd.to_datetime(start_date_str) or
|
1288 |
pd.to_datetime(date) > pd.to_datetime(end_date_str) or
|
1289 |
date in processed_weeks):
|
|
|
1298 |
period_label = 'Year'
|
1299 |
date = ee.Date(timestamp).format('YYYY').getInfo()
|
1300 |
|
1301 |
+
index_image = calculate_index_for_period(image, roi, selected_bands, custom_formula, reducer_choice)
|
1302 |
|
1303 |
try:
|
1304 |
index_value = index_image.reduceRegion(
|
1305 |
reducer=get_reducer(reducer_choice),
|
1306 |
geometry=roi,
|
1307 |
scale=30
|
1308 |
+
).get('custom_result')
|
1309 |
|
1310 |
calculated_value = index_value.getInfo()
|
1311 |
|
|
|
1336 |
|
1337 |
try:
|
1338 |
roi = convert_to_ee_geometry(polygon_geometry)
|
1339 |
+
if not include_boundary:
|
1340 |
+
# Erode the polygon by a small buffer (e.g., 1 pixel = 30m) to exclude boundary
|
1341 |
+
roi = roi.buffer(-30).bounds()
|
1342 |
except ValueError as e:
|
1343 |
st.warning(f"Skipping invalid polygon {polygon_name}: {e}")
|
1344 |
continue
|
|
|
1347 |
.filterDate(ee.Date(start_date_str), ee.Date(end_date_str)) \
|
1348 |
.filterBounds(roi)
|
1349 |
|
|
|
1350 |
if aggregation_period.lower() == 'daily':
|
1351 |
collection = aggregate_data_daily(collection)
|
1352 |
elif aggregation_period.lower() == 'weekly':
|
|
|
1356 |
elif aggregation_period.lower() == 'yearly':
|
1357 |
collection = aggregate_data_yearly(collection)
|
1358 |
|
|
|
1359 |
image_list = collection.toList(collection.size())
|
1360 |
+
processed_weeks = set()
|
1361 |
for i in range(image_list.size().getInfo()):
|
1362 |
image = ee.Image(image_list.get(i))
|
1363 |
|
|
|
1368 |
elif aggregation_period.lower() == 'weekly':
|
1369 |
timestamp = image.get('week_start')
|
1370 |
period_label = 'Week'
|
1371 |
+
date = ee.String(timestamp).getInfo()
|
|
|
1372 |
if (pd.to_datetime(date) < pd.to_datetime(start_date_str) or
|
1373 |
pd.to_datetime(date) > pd.to_datetime(end_date_str) or
|
1374 |
date in processed_weeks):
|
|
|
1383 |
period_label = 'Year'
|
1384 |
date = ee.Date(timestamp).format('YYYY').getInfo()
|
1385 |
|
1386 |
+
index_image = calculate_index_for_period(image, roi, selected_bands, custom_formula, reducer_choice)
|
1387 |
|
1388 |
try:
|
1389 |
index_value = index_image.reduceRegion(
|
1390 |
reducer=get_reducer(reducer_choice),
|
1391 |
geometry=roi,
|
1392 |
scale=30
|
1393 |
+
).get('custom_result')
|
1394 |
|
1395 |
calculated_value = index_value.getInfo()
|
1396 |
|
|
|
1411 |
progress_bar.progress(progress_percentage)
|
1412 |
progress_text.markdown(f"Processing: {int(progress_percentage * 100)}%")
|
1413 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1414 |
if aggregated_results:
|
1415 |
result_df = pd.DataFrame(aggregated_results)
|
1416 |
if aggregation_period.lower() == 'daily':
|
|
|
1417 |
agg_dict = {
|
1418 |
'Start Date': 'first',
|
1419 |
'End Date': 'first',
|
|
|
1422 |
if shape_type.lower() == 'point':
|
1423 |
agg_dict['Latitude'] = 'first'
|
1424 |
agg_dict['Longitude'] = 'first'
|
|
|
1425 |
aggregated_output = result_df.groupby('Location Name').agg(agg_dict).reset_index()
|
1426 |
aggregated_output.rename(columns={'Calculated Value': 'Aggregated Value'}, inplace=True)
|
1427 |
return aggregated_output.to_dict(orient='records')
|
1428 |
else:
|
1429 |
return result_df.to_dict(orient='records')
|
|
|
1430 |
return []
|
1431 |
+
|
1432 |
+
# Button to trigger calculation
|
1433 |
+
if st.button("Calculate"):
|
1434 |
if file_upload is not None:
|
1435 |
+
if shape_type.lower() in ["point", "polygon"]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1436 |
results = process_aggregation(
|
1437 |
locations_df,
|
1438 |
start_date_str,
|
1439 |
end_date_str,
|
1440 |
dataset_id,
|
1441 |
+
selected_bands,
|
1442 |
reducer_choice,
|
1443 |
shape_type,
|
1444 |
aggregation_period,
|
1445 |
+
custom_formula,
|
1446 |
+
kernel_size=kernel_size,
|
1447 |
+
include_boundary=include_boundary
|
1448 |
)
|
1449 |
if results:
|
1450 |
result_df = pd.DataFrame(results)
|
|
|
1461 |
st.success('Processing complete!')
|
1462 |
else:
|
1463 |
st.warning("No results were generated.")
|
|
|
1464 |
else:
|
1465 |
st.warning("Please upload a file.")
|