code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def save_context(self, text, t_text): """Save the current translation pair to context""" if not self.context_flag: return self.context_list.append(text) self.context_translated_list.append(t_text) # Keep only the most recent paragraphs within the limit if len(self.context_list) > self.context_paragraph_limit: self.context_list.pop(0) self.context_translated_list.pop(0)
Save the current translation pair to context
save_context
python
yihong0618/bilingual_book_maker
book_maker/translator/claude_translator.py
https://github.com/yihong0618/bilingual_book_maker/blob/master/book_maker/translator/claude_translator.py
MIT
def scan(self): """ Scan keyboard matrix and save key event into the queue. :return: length of the key event queue. """ t = time.monotonic_ns() # use local variables to speed up pressed = self.pressed last_mask = self.mask cols = self.cols mask = 0 count = 0 key_index = -1 for row in self.rows: row.value = pressed # select row for col in cols: key_index += 1 if col.value == pressed: key_mask = 1 << key_index if not (last_mask & key_mask): if t - self.t1[key_index] < self._debounce_time: print("debonce") continue self.t0[key_index] = t self.put(key_index) mask |= key_mask count += 1 elif last_mask and (last_mask & (1 << key_index)): if t - self.t0[key_index] < self._debounce_time: print("debonce") mask |= 1 << key_index continue self.t1[key_index] = t self.put(0x80 | key_index) row.value = not pressed self.mask = mask self.count = count return self.length
Scan keyboard matrix and save key event into the queue. :return: length of the key event queue.
scan
python
makerdiary/python-keyboard
keyboard/matrix.py
https://github.com/makerdiary/python-keyboard/blob/master/keyboard/matrix.py
MIT
def wait(self, timeout=1000): """Wait for a new key event or timeout""" last = self.length if timeout: end_time = time.monotonic_ns() + timeout * 1000000 while True: n = self.scan() if n > last or time.monotonic_ns() > end_time: return n else: while True: n = self.scan() if n > last: return n
Wait for a new key event or timeout
wait
python
makerdiary/python-keyboard
keyboard/matrix.py
https://github.com/makerdiary/python-keyboard/blob/master/keyboard/matrix.py
MIT
def put(self, data): """Put a key event into the queue""" self.queue[self.head] = data self.head += 1 if self.head >= self.keys: self.head = 0 self.length += 1
Put a key event into the queue
put
python
makerdiary/python-keyboard
keyboard/matrix.py
https://github.com/makerdiary/python-keyboard/blob/master/keyboard/matrix.py
MIT
def get(self): """Remove and return the first event from the queue.""" data = self.queue[self.tail] self.tail += 1 if self.tail >= self.keys: self.tail = 0 self.length -= 1 return data
Remove and return the first event from the queue.
get
python
makerdiary/python-keyboard
keyboard/matrix.py
https://github.com/makerdiary/python-keyboard/blob/master/keyboard/matrix.py
MIT
def view(self, n): """Return the specified event""" return self.queue[(self.tail + n) % self.keys]
Return the specified event
view
python
makerdiary/python-keyboard
keyboard/matrix.py
https://github.com/makerdiary/python-keyboard/blob/master/keyboard/matrix.py
MIT
def __getitem__(self, n): """Return the specified event""" return self.queue[(self.tail + n) % self.keys]
Return the specified event
__getitem__
python
makerdiary/python-keyboard
keyboard/matrix.py
https://github.com/makerdiary/python-keyboard/blob/master/keyboard/matrix.py
MIT
def __len__(self): """Return the number of events in the queue""" return self.length
Return the number of events in the queue
__len__
python
makerdiary/python-keyboard
keyboard/matrix.py
https://github.com/makerdiary/python-keyboard/blob/master/keyboard/matrix.py
MIT
def get_keydown_time(self, key): """Return the key pressed time""" return self.t0[key]
Return the key pressed time
get_keydown_time
python
makerdiary/python-keyboard
keyboard/matrix.py
https://github.com/makerdiary/python-keyboard/blob/master/keyboard/matrix.py
MIT
def get_keyup_time(self, key): """Return the key released time""" return self.t1[key]
Return the key released time
get_keyup_time
python
makerdiary/python-keyboard
keyboard/matrix.py
https://github.com/makerdiary/python-keyboard/blob/master/keyboard/matrix.py
MIT
def time(self): """Return current time""" return time.monotonic_ns()
Return current time
time
python
makerdiary/python-keyboard
keyboard/matrix.py
https://github.com/makerdiary/python-keyboard/blob/master/keyboard/matrix.py
MIT
def ms(self, t): """Convert time to milliseconds""" return t // 1000000
Convert time to milliseconds
ms
python
makerdiary/python-keyboard
keyboard/matrix.py
https://github.com/makerdiary/python-keyboard/blob/master/keyboard/matrix.py
MIT
def debounce_time(self, t): """Set debounce time""" self._debounce_time = t * 1000000
Set debounce time
debounce_time
python
makerdiary/python-keyboard
keyboard/matrix.py
https://github.com/makerdiary/python-keyboard/blob/master/keyboard/matrix.py
MIT
def suspend(self): """Suspend keyboard""" pass
Suspend keyboard
suspend
python
makerdiary/python-keyboard
keyboard/matrix.py
https://github.com/makerdiary/python-keyboard/blob/master/keyboard/matrix.py
MIT
def mem(r0): """Read memory from the address""" ldr(r0, [r0, 0])
Read memory from the address
mem
python
makerdiary/python-keyboard
keyboard/util.py
https://github.com/makerdiary/python-keyboard/blob/master/keyboard/util.py
MIT
def get_key_sequence_info(self, start, end): """Get the info from a sequence of key events""" matrix = self.matrix event = matrix.view(start - 1) key = event & 0x7F desc = key_name(key) if event < 0x80: desc += " \\ " t0 = matrix.get_keydown_time(key) else: desc += " / " t0 = matrix.get_keyup_time(key) t = [] for i in range(start, end): event = matrix.view(i) key = event & 0x7F desc += key_name(key) if event < 0x80: desc += " \\ " t1 = matrix.get_keydown_time(key) else: desc += " / " t1 = matrix.get_keyup_time(key) dt = matrix.ms(t1 - t0) t0 = t1 t.append(dt) return desc, t
Get the info from a sequence of key events
get_key_sequence_info
python
makerdiary/python-keyboard
keyboard/__init__.py
https://github.com/makerdiary/python-keyboard/blob/master/keyboard/__init__.py
MIT
def is_tapping_key(self, key): """Check if the key is tapped (press & release quickly)""" matrix = self.matrix n = len(matrix) if n == 0: n = matrix.wait( self.tap_delay - matrix.ms(matrix.time() - matrix.get_keydown_time(key)) ) target = key | 0x80 if n >= 1: new_key = matrix.view(0) if new_key == target: return True if new_key >= 0x80: # Fast Typing - B is a tap-key # A↓ B↓ A↑ B↑ # --+-------+-------+-------+------> t # | dt1 | # dt1 < tap_delay if self.verbose: desc, t = self.get_key_sequence_info(-1, n) print(desc) print(t) return True if n == 1: n = matrix.wait( self.fast_type_thresh - matrix.ms(matrix.time() - matrix.get_keydown_time(new_key)) ) if n < 2: return False if target == matrix.view(1): # Fast Typing - B is a tap-key # B↓ C↓ B↑ C↑ # --+-------+-------+-------+------> t # | dt1 | dt2 | # dt1 < tap_delay && dt2 < fast_type_thresh if self.verbose: desc, t = self.get_key_sequence_info(-1, n) print(desc) print(t) return True if self.verbose: desc, t = self.get_key_sequence_info(-1, n) print(desc) print(t) return False
Check if the key is tapped (press & release quickly)
is_tapping_key
python
makerdiary/python-keyboard
keyboard/__init__.py
https://github.com/makerdiary/python-keyboard/blob/master/keyboard/__init__.py
MIT
def pixel(self, i, r, g, b): """Set the pixel. It takes effect after calling update()""" row = i >> 4 # i // 16 col = i & 15 # i % 16 offset = row * 48 + col self.pixels[offset] = g self.pixels[offset + 16] = r self.pixels[offset + 32] = b
Set the pixel. It takes effect after calling update()
pixel
python
makerdiary/python-keyboard
keyboard/model/is32fl3733.py
https://github.com/makerdiary/python-keyboard/blob/master/keyboard/model/is32fl3733.py
MIT
def update_pixel(self, i, r, g, b): """Set the pixel and update""" row = i >> 4 # i // 16 col = i & 15 # i % 16 offset = row * 48 + col self.pixels[offset] = g self.pixels[offset + 16] = r self.pixels[offset + 32] = b self.power.value = 1 self.page(1) self.write(offset, g) self.write(offset + 16, r) self.write(offset + 32, b) if not self.any(): self.power.value = 0
Set the pixel and update
update_pixel
python
makerdiary/python-keyboard
keyboard/model/is32fl3733.py
https://github.com/makerdiary/python-keyboard/blob/master/keyboard/model/is32fl3733.py
MIT
def any(self): """Check if any pixel is not zero""" if self.mode_mask > 0: return True for pixel in self.pixels: if pixel > 0: return True return False
Check if any pixel is not zero
any
python
makerdiary/python-keyboard
keyboard/model/is32fl3733.py
https://github.com/makerdiary/python-keyboard/blob/master/keyboard/model/is32fl3733.py
MIT
def load_jupyter_server_extension(nbapp): """serve the streamlit app""" Popen( [ "streamlit", "run", "Home.py", "--browser.serverAddress=0.0.0.0", "--server.enableCORS=False", ] )
serve the streamlit app
load_jupyter_server_extension
python
opengeos/streamlit-geospatial
streamlit_call.py
https://github.com/opengeos/streamlit-geospatial/blob/master/streamlit_call.py
MIT
def app(): st.title("U.S. Real Estate Data and Market Trends") st.markdown( """**Introduction:** This interactive dashboard is designed for visualizing U.S. real estate data and market trends at multiple levels (i.e., national, state, county, and metro). The data sources include [Real Estate Data](https://www.realtor.com/research/data) from realtor.com and [Cartographic Boundary Files](https://www.census.gov/geographies/mapping-files/time-series/geo/carto-boundary-file.html) from U.S. Census Bureau. Several open-source packages are used to process the data and generate the visualizations, e.g., [streamlit](https://streamlit.io), [geopandas](https://geopandas.org), [leafmap](https://leafmap.org), and [pydeck](https://deckgl.readthedocs.io). """ ) with st.expander("See a demo"): st.image("https://i.imgur.com/Z3dk6Tr.gif") row1_col1, row1_col2, row1_col3, row1_col4, row1_col5 = st.columns( [0.6, 0.8, 0.6, 1.4, 2] ) with row1_col1: frequency = st.selectbox("Monthly/weekly data", ["Monthly", "Weekly"]) with row1_col2: types = ["Current month data", "Historical data"] if frequency == "Weekly": types.remove("Current month data") cur_hist = st.selectbox( "Current/historical data", types, ) with row1_col3: if frequency == "Monthly": scale = st.selectbox( "Scale", ["National", "State", "Metro", "County"], index=3 ) else: scale = st.selectbox("Scale", ["National", "Metro"], index=1) gdf = get_geom_data(scale.lower()) if frequency == "Weekly": inventory_df = get_inventory_data(data_links["weekly"][scale.lower()]) weeks = get_weeks(inventory_df) with row1_col1: selected_date = st.date_input("Select a date", value=weeks[-1]) saturday = get_saturday(selected_date) selected_period = saturday.strftime("%-m/%-d/%Y") if saturday not in weeks: st.error( "The selected date is not available in the data. Please select a date between {} and {}".format( weeks[0], weeks[-1] ) ) selected_period = weeks[-1].strftime("%-m/%-d/%Y") inventory_df = get_inventory_data(data_links["weekly"][scale.lower()]) inventory_df = filter_weekly_inventory(inventory_df, selected_period) if frequency == "Monthly": if cur_hist == "Current month data": inventory_df = get_inventory_data( data_links["monthly_current"][scale.lower()] ) selected_period = get_periods(inventory_df)[0] else: with row1_col2: inventory_df = get_inventory_data( data_links["monthly_historical"][scale.lower()] ) start_year, end_year = get_start_end_year(inventory_df) periods = get_periods(inventory_df) with st.expander("Select year and month", True): selected_year = st.slider( "Year", start_year, end_year, value=start_year, step=1, ) selected_month = st.slider( "Month", min_value=1, max_value=12, value=int(periods[0][-2:]), step=1, ) selected_period = str(selected_year) + str(selected_month).zfill(2) if selected_period not in periods: st.error("Data not available for selected year and month") selected_period = periods[0] inventory_df = inventory_df[ inventory_df["month_date_yyyymm"] == int(selected_period) ] data_cols = get_data_columns(inventory_df, scale.lower(), frequency.lower()) with row1_col4: selected_col = st.selectbox("Attribute", data_cols) with row1_col5: show_desc = st.checkbox("Show attribute description") if show_desc: try: label, desc = get_data_dict(selected_col.strip()) markdown = f""" **{label}**: {desc} """ st.markdown(markdown) except: st.warning("No description available for selected attribute") row2_col1, row2_col2, row2_col3, row2_col4, row2_col5, row2_col6 = st.columns( [0.6, 0.68, 0.7, 0.7, 1.5, 0.8] ) palettes = cm.list_colormaps() with row2_col1: palette = st.selectbox("Color palette", palettes, index=palettes.index("Blues")) with row2_col2: n_colors = st.slider("Number of colors", min_value=2, max_value=20, value=8) with row2_col3: show_nodata = st.checkbox("Show nodata areas", value=True) with row2_col4: show_3d = st.checkbox("Show 3D view", value=False) with row2_col5: if show_3d: elev_scale = st.slider( "Elevation scale", min_value=1, max_value=1000000, value=1, step=10 ) with row2_col6: st.info("Press Ctrl and move the left mouse button.") else: elev_scale = 1 gdf = join_attributes(gdf, inventory_df, scale.lower()) gdf_null = select_null(gdf, selected_col) gdf = select_non_null(gdf, selected_col) gdf = gdf.sort_values(by=selected_col, ascending=True) colors = cm.get_palette(palette, n_colors) colors = [hex_to_rgb(c) for c in colors] for i, ind in enumerate(gdf.index): index = int(i / (len(gdf) / len(colors))) if index >= len(colors): index = len(colors) - 1 gdf.loc[ind, "R"] = colors[index][0] gdf.loc[ind, "G"] = colors[index][1] gdf.loc[ind, "B"] = colors[index][2] initial_view_state = pdk.ViewState( latitude=40, longitude=-100, zoom=3, max_zoom=16, pitch=0, bearing=0, height=900, width=None, ) min_value = gdf[selected_col].min() max_value = gdf[selected_col].max() color = "color" # color_exp = f"[({selected_col}-{min_value})/({max_value}-{min_value})*255, 0, 0]" color_exp = f"[R, G, B]" geojson = pdk.Layer( "GeoJsonLayer", gdf, pickable=True, opacity=0.5, stroked=True, filled=True, extruded=show_3d, wireframe=True, get_elevation=f"{selected_col}", elevation_scale=elev_scale, # get_fill_color="color", get_fill_color=color_exp, get_line_color=[0, 0, 0], get_line_width=2, line_width_min_pixels=1, ) geojson_null = pdk.Layer( "GeoJsonLayer", gdf_null, pickable=True, opacity=0.2, stroked=True, filled=True, extruded=False, wireframe=True, # get_elevation="properties.ALAND/100000", # get_fill_color="color", get_fill_color=[200, 200, 200], get_line_color=[0, 0, 0], get_line_width=2, line_width_min_pixels=1, ) # tooltip = {"text": "Name: {NAME}"} # tooltip_value = f"<b>Value:</b> {median_listing_price}"" tooltip = { "html": "<b>Name:</b> {NAME}<br><b>Value:</b> {" + selected_col + "}<br><b>Date:</b> " + selected_period + "", "style": {"backgroundColor": "steelblue", "color": "white"}, } layers = [geojson] if show_nodata: layers.append(geojson_null) r = pdk.Deck( layers=layers, initial_view_state=initial_view_state, map_style="light", tooltip=tooltip, ) row3_col1, row3_col2 = st.columns([6, 1]) with row3_col1: st.pydeck_chart(r) with row3_col2: st.write( cm.create_colormap( palette, label=selected_col.replace("_", " ").title(), width=0.2, height=3, orientation="vertical", vmin=min_value, vmax=max_value, font_size=10, ) ) row4_col1, row4_col2, row4_col3 = st.columns([1, 2, 3]) with row4_col1: show_data = st.checkbox("Show raw data") with row4_col2: show_cols = st.multiselect("Select columns", data_cols) with row4_col3: show_colormaps = st.checkbox("Preview all color palettes") if show_colormaps: st.write(cm.plot_colormaps(return_fig=True)) if show_data: if scale == "National": st.dataframe(gdf[["NAME", "GEOID"] + show_cols]) elif scale == "State": st.dataframe(gdf[["NAME", "STUSPS"] + show_cols]) elif scale == "County": st.dataframe(gdf[["NAME", "STATEFP", "COUNTYFP"] + show_cols]) elif scale == "Metro": st.dataframe(gdf[["NAME", "CBSAFP"] + show_cols]) elif scale == "Zip": st.dataframe(gdf[["GEOID10"] + show_cols])
**Introduction:** This interactive dashboard is designed for visualizing U.S. real estate data and market trends at multiple levels (i.e., national, state, county, and metro). The data sources include [Real Estate Data](https://www.realtor.com/research/data) from realtor.com and [Cartographic Boundary Files](https://www.census.gov/geographies/mapping-files/time-series/geo/carto-boundary-file.html) from U.S. Census Bureau. Several open-source packages are used to process the data and generate the visualizations, e.g., [streamlit](https://streamlit.io), [geopandas](https://geopandas.org), [leafmap](https://leafmap.org), and [pydeck](https://deckgl.readthedocs.io).
app
python
opengeos/streamlit-geospatial
pages/2_🏠_U.S._Housing.py
https://github.com/opengeos/streamlit-geospatial/blob/master/pages/2_🏠_U.S._Housing.py
MIT
def save_uploaded_file(file_content, file_name): """ Save the uploaded file to a temporary directory """ import tempfile import os import uuid _, file_extension = os.path.splitext(file_name) file_id = str(uuid.uuid4()) file_path = os.path.join(tempfile.gettempdir(), f"{file_id}{file_extension}") with open(file_path, "wb") as file: file.write(file_content.getbuffer()) return file_path
Save the uploaded file to a temporary directory
save_uploaded_file
python
opengeos/streamlit-geospatial
pages/9_🔲_Vector_Data_Visualization.py
https://github.com/opengeos/streamlit-geospatial/blob/master/pages/9_🔲_Vector_Data_Visualization.py
MIT
def app(): today = date.today() st.title("Create Satellite Timelapse") st.markdown( """ An interactive web app for creating [Landsat](https://developers.google.com/earth-engine/datasets/catalog/landsat)/[GOES](https://jstnbraaten.medium.com/goes-in-earth-engine-53fbc8783c16) timelapse for any location around the globe. The app was built using [streamlit](https://streamlit.io), [geemap](https://geemap.org), and [Google Earth Engine](https://earthengine.google.com). For more info, check out my streamlit [blog post](https://blog.streamlit.io/creating-satellite-timelapse-with-streamlit-and-earth-engine). """ ) row1_col1, row1_col2 = st.columns([2, 1]) if st.session_state.get("zoom_level") is None: st.session_state["zoom_level"] = 4 st.session_state["ee_asset_id"] = None st.session_state["bands"] = None st.session_state["palette"] = None st.session_state["vis_params"] = None with row1_col1: ee_authenticate(token_name="EARTHENGINE_TOKEN") m = geemap.Map( basemap="HYBRID", plugin_Draw=True, Draw_export=True, locate_control=True, plugin_LatLngPopup=False, ) m.add_basemap("ROADMAP") with row1_col2: keyword = st.text_input("Search for a location:", "") if keyword: locations = geemap.geocode(keyword) if locations is not None and len(locations) > 0: str_locations = [str(g)[1:-1] for g in locations] location = st.selectbox("Select a location:", str_locations) loc_index = str_locations.index(location) selected_loc = locations[loc_index] lat, lng = selected_loc.lat, selected_loc.lng folium.Marker(location=[lat, lng], popup=location).add_to(m) m.set_center(lng, lat, 12) st.session_state["zoom_level"] = 12 collection = st.selectbox( "Select a satellite image collection: ", [ "Any Earth Engine ImageCollection", "Landsat TM-ETM-OLI Surface Reflectance", "Sentinel-2 MSI Surface Reflectance", "Geostationary Operational Environmental Satellites (GOES)", "MODIS Vegetation Indices (NDVI/EVI) 16-Day Global 1km", "MODIS Gap filled Land Surface Temperature Daily", "MODIS Ocean Color SMI", "USDA National Agriculture Imagery Program (NAIP)", ], index=1, ) if collection in [ "Landsat TM-ETM-OLI Surface Reflectance", "Sentinel-2 MSI Surface Reflectance", ]: roi_options = ["Uploaded GeoJSON"] + list(landsat_rois.keys()) elif collection == "Geostationary Operational Environmental Satellites (GOES)": roi_options = ["Uploaded GeoJSON"] + list(goes_rois.keys()) elif collection in [ "MODIS Vegetation Indices (NDVI/EVI) 16-Day Global 1km", "MODIS Gap filled Land Surface Temperature Daily", ]: roi_options = ["Uploaded GeoJSON"] + list(modis_rois.keys()) elif collection == "MODIS Ocean Color SMI": roi_options = ["Uploaded GeoJSON"] + list(ocean_rois.keys()) else: roi_options = ["Uploaded GeoJSON"] if collection == "Any Earth Engine ImageCollection": keyword = st.text_input("Enter a keyword to search (e.g., MODIS):", "") if keyword: assets = geemap.search_ee_data(keyword) ee_assets = [] for asset in assets: if asset["ee_id_snippet"].startswith("ee.ImageCollection"): ee_assets.append(asset) asset_titles = [x["title"] for x in ee_assets] dataset = st.selectbox("Select a dataset:", asset_titles) if len(ee_assets) > 0: st.session_state["ee_assets"] = ee_assets st.session_state["asset_titles"] = asset_titles index = asset_titles.index(dataset) ee_id = ee_assets[index]["id"] else: ee_id = "" if dataset is not None: with st.expander("Show dataset details", False): index = asset_titles.index(dataset) html = geemap.ee_data_html(st.session_state["ee_assets"][index]) st.markdown(html, True) # elif collection == "MODIS Gap filled Land Surface Temperature Daily": # ee_id = "" else: ee_id = "" asset_id = st.text_input("Enter an ee.ImageCollection asset ID:", ee_id) if asset_id: with st.expander("Customize band combination and color palette", True): try: col = ee.ImageCollection.load(asset_id) st.session_state["ee_asset_id"] = asset_id except: st.error("Invalid Earth Engine asset ID.") st.session_state["ee_asset_id"] = None return img_bands = col.first().bandNames().getInfo() if len(img_bands) >= 3: default_bands = img_bands[:3][::-1] else: default_bands = img_bands[:] bands = st.multiselect( "Select one or three bands (RGB):", img_bands, default_bands ) st.session_state["bands"] = bands if len(bands) == 1: palette_options = st.selectbox( "Color palette", cm.list_colormaps(), index=2, ) palette_values = cm.get_palette(palette_options, 15) palette = st.text_area( "Enter a custom palette:", palette_values, ) st.write( cm.plot_colormap(cmap=palette_options, return_fig=True) ) st.session_state["palette"] = json.loads( palette.replace("'", '"') ) if bands: vis_params = st.text_area( "Enter visualization parameters", "{'bands': [" + ", ".join([f"'{band}'" for band in bands]) + "]}", ) else: vis_params = st.text_area( "Enter visualization parameters", "{}", ) try: st.session_state["vis_params"] = json.loads( vis_params.replace("'", '"') ) st.session_state["vis_params"]["palette"] = st.session_state[ "palette" ] except Exception as e: st.session_state["vis_params"] = None st.error( f"Invalid visualization parameters. It must be a dictionary." ) elif collection == "MODIS Gap filled Land Surface Temperature Daily": with st.expander("Show dataset details", False): st.markdown( """ See the [Awesome GEE Community Datasets](https://samapriya.github.io/awesome-gee-community-datasets/projects/daily_lst/). """ ) MODIS_options = ["Daytime (1:30 pm)", "Nighttime (1:30 am)"] MODIS_option = st.selectbox("Select a MODIS dataset:", MODIS_options) if MODIS_option == "Daytime (1:30 pm)": st.session_state["ee_asset_id"] = ( "projects/sat-io/open-datasets/gap-filled-lst/gf_day_1km" ) else: st.session_state["ee_asset_id"] = ( "projects/sat-io/open-datasets/gap-filled-lst/gf_night_1km" ) palette_options = st.selectbox( "Color palette", cm.list_colormaps(), index=90, ) palette_values = cm.get_palette(palette_options, 15) palette = st.text_area( "Enter a custom palette:", palette_values, ) st.write(cm.plot_colormap(cmap=palette_options, return_fig=True)) st.session_state["palette"] = json.loads(palette.replace("'", '"')) elif collection == "MODIS Ocean Color SMI": with st.expander("Show dataset details", False): st.markdown( """ See the [Earth Engine Data Catalog](https://developers.google.com/earth-engine/datasets/catalog/NASA_OCEANDATA_MODIS-Aqua_L3SMI). """ ) MODIS_options = ["Aqua", "Terra"] MODIS_option = st.selectbox("Select a satellite:", MODIS_options) st.session_state["ee_asset_id"] = MODIS_option # if MODIS_option == "Daytime (1:30 pm)": # st.session_state[ # "ee_asset_id" # ] = "projects/sat-io/open-datasets/gap-filled-lst/gf_day_1km" # else: # st.session_state[ # "ee_asset_id" # ] = "projects/sat-io/open-datasets/gap-filled-lst/gf_night_1km" band_dict = { "Chlorophyll a concentration": "chlor_a", "Normalized fluorescence line height": "nflh", "Particulate organic carbon": "poc", "Sea surface temperature": "sst", "Remote sensing reflectance at band 412nm": "Rrs_412", "Remote sensing reflectance at band 443nm": "Rrs_443", "Remote sensing reflectance at band 469nm": "Rrs_469", "Remote sensing reflectance at band 488nm": "Rrs_488", "Remote sensing reflectance at band 531nm": "Rrs_531", "Remote sensing reflectance at band 547nm": "Rrs_547", "Remote sensing reflectance at band 555nm": "Rrs_555", "Remote sensing reflectance at band 645nm": "Rrs_645", "Remote sensing reflectance at band 667nm": "Rrs_667", "Remote sensing reflectance at band 678nm": "Rrs_678", } band_options = list(band_dict.keys()) band = st.selectbox( "Select a band", band_options, band_options.index("Sea surface temperature"), ) st.session_state["band"] = band_dict[band] colors = cm.list_colormaps() palette_options = st.selectbox( "Color palette", colors, index=colors.index("coolwarm"), ) palette_values = cm.get_palette(palette_options, 15) palette = st.text_area( "Enter a custom palette:", palette_values, ) st.write(cm.plot_colormap(cmap=palette_options, return_fig=True)) st.session_state["palette"] = json.loads(palette.replace("'", '"')) sample_roi = st.selectbox( "Select a sample ROI or upload a GeoJSON file:", roi_options, index=0, ) add_outline = st.checkbox( "Overlay an administrative boundary on timelapse", False ) if add_outline: with st.expander("Customize administrative boundary", True): overlay_options = { "User-defined": None, "Continents": "continents", "Countries": "countries", "US States": "us_states", "China": "china", } overlay = st.selectbox( "Select an administrative boundary:", list(overlay_options.keys()), index=2, ) overlay_data = overlay_options[overlay] if overlay_data is None: overlay_data = st.text_input( "Enter an HTTP URL to a GeoJSON file or an ee.FeatureCollection asset id:", "https://raw.githubusercontent.com/giswqs/geemap/master/examples/data/countries.geojson", ) overlay_color = st.color_picker( "Select a color for the administrative boundary:", "#000000" ) overlay_width = st.slider( "Select a line width for the administrative boundary:", 1, 20, 1 ) overlay_opacity = st.slider( "Select an opacity for the administrative boundary:", 0.0, 1.0, 1.0, 0.05, ) else: overlay_data = None overlay_color = "black" overlay_width = 1 overlay_opacity = 1 with row1_col1: with st.expander( "Steps: Draw a rectangle on the map -> Export it as a GeoJSON -> Upload it back to the app -> Click the Submit button. Expand this tab to see a demo 👉" ): video_empty = st.empty() data = st.file_uploader( "Upload a GeoJSON file to use as an ROI. Customize timelapse parameters and then click the Submit button 😇👇", type=["geojson", "kml", "zip"], ) crs = "epsg:4326" if sample_roi == "Uploaded GeoJSON": if data is None: # st.info( # "Steps to create a timelapse: Draw a rectangle on the map -> Export it as a GeoJSON -> Upload it back to the app -> Click Submit button" # ) if collection in [ "Geostationary Operational Environmental Satellites (GOES)", "USDA National Agriculture Imagery Program (NAIP)", ] and (not keyword): m.set_center(-100, 40, 3) # else: # m.set_center(4.20, 18.63, zoom=2) else: if collection in [ "Landsat TM-ETM-OLI Surface Reflectance", "Sentinel-2 MSI Surface Reflectance", ]: gdf = gpd.GeoDataFrame( index=[0], crs=crs, geometry=[landsat_rois[sample_roi]] ) elif ( collection == "Geostationary Operational Environmental Satellites (GOES)" ): gdf = gpd.GeoDataFrame( index=[0], crs=crs, geometry=[goes_rois[sample_roi]["region"]] ) elif collection == "MODIS Vegetation Indices (NDVI/EVI) 16-Day Global 1km": gdf = gpd.GeoDataFrame( index=[0], crs=crs, geometry=[modis_rois[sample_roi]] ) if sample_roi != "Uploaded GeoJSON": if collection in [ "Landsat TM-ETM-OLI Surface Reflectance", "Sentinel-2 MSI Surface Reflectance", ]: gdf = gpd.GeoDataFrame( index=[0], crs=crs, geometry=[landsat_rois[sample_roi]] ) elif ( collection == "Geostationary Operational Environmental Satellites (GOES)" ): gdf = gpd.GeoDataFrame( index=[0], crs=crs, geometry=[goes_rois[sample_roi]["region"]] ) elif collection in [ "MODIS Vegetation Indices (NDVI/EVI) 16-Day Global 1km", "MODIS Gap filled Land Surface Temperature Daily", ]: gdf = gpd.GeoDataFrame( index=[0], crs=crs, geometry=[modis_rois[sample_roi]] ) elif collection == "MODIS Ocean Color SMI": gdf = gpd.GeoDataFrame( index=[0], crs=crs, geometry=[ocean_rois[sample_roi]] ) try: st.session_state["roi"] = geemap.gdf_to_ee(gdf, geodesic=False) except Exception as e: st.error(e) st.error("Please draw another ROI and try again.") return m.add_gdf(gdf, "ROI") elif data: gdf = uploaded_file_to_gdf(data) try: st.session_state["roi"] = geemap.gdf_to_ee(gdf, geodesic=False) m.add_gdf(gdf, "ROI") except Exception as e: st.error(e) st.error("Please draw another ROI and try again.") return m.to_streamlit(height=600) with row1_col2: if collection in [ "Landsat TM-ETM-OLI Surface Reflectance", "Sentinel-2 MSI Surface Reflectance", ]: if collection == "Landsat TM-ETM-OLI Surface Reflectance": sensor_start_year = 1984 timelapse_title = "Landsat Timelapse" timelapse_speed = 5 elif collection == "Sentinel-2 MSI Surface Reflectance": sensor_start_year = 2015 timelapse_title = "Sentinel-2 Timelapse" timelapse_speed = 5 video_empty.video("https://youtu.be/VVRK_-dEjR4") with st.form("submit_landsat_form"): roi = None if st.session_state.get("roi") is not None: roi = st.session_state.get("roi") out_gif = geemap.temp_file_path(".gif") title = st.text_input( "Enter a title to show on the timelapse: ", timelapse_title ) RGB = st.selectbox( "Select an RGB band combination:", [ "Red/Green/Blue", "NIR/Red/Green", "SWIR2/SWIR1/NIR", "NIR/SWIR1/Red", "SWIR2/NIR/Red", "SWIR2/SWIR1/Red", "SWIR1/NIR/Blue", "NIR/SWIR1/Blue", "SWIR2/NIR/Green", "SWIR1/NIR/Red", "SWIR2/NIR/SWIR1", "SWIR1/NIR/SWIR2", ], index=9, ) frequency = st.selectbox( "Select a temporal frequency:", ["year", "quarter", "month"], index=0, ) with st.expander("Customize timelapse"): speed = st.slider("Frames per second:", 1, 30, timelapse_speed) dimensions = st.slider( "Maximum dimensions (Width*Height) in pixels", 768, 2000, 768 ) progress_bar_color = st.color_picker( "Progress bar color:", "#0000ff" ) years = st.slider( "Start and end year:", sensor_start_year, today.year, (sensor_start_year, today.year), ) months = st.slider("Start and end month:", 1, 12, (1, 12)) font_size = st.slider("Font size:", 10, 50, 30) font_color = st.color_picker("Font color:", "#ffffff") apply_fmask = st.checkbox( "Apply fmask (remove clouds, shadows, snow)", True ) font_type = st.selectbox( "Select the font type for the title:", ["arial.ttf", "alibaba.otf"], index=0, ) fading = st.slider( "Fading duration (seconds) for each frame:", 0.0, 3.0, 0.0 ) mp4 = st.checkbox("Save timelapse as MP4", True) empty_text = st.empty() empty_image = st.empty() empty_fire_image = st.empty() empty_video = st.container() submitted = st.form_submit_button("Submit") if submitted: if sample_roi == "Uploaded GeoJSON" and data is None: empty_text.warning( "Steps to create a timelapse: Draw a rectangle on the map -> Export it as a GeoJSON -> Upload it back to the app -> Click the Submit button. Alternatively, you can select a sample ROI from the dropdown list." ) else: empty_text.text("Computing... Please wait...") start_year = years[0] end_year = years[1] start_date = str(months[0]).zfill(2) + "-01" end_date = str(months[1]).zfill(2) + "-30" bands = RGB.split("/") try: if collection == "Landsat TM-ETM-OLI Surface Reflectance": out_gif = geemap.landsat_timelapse( roi=roi, out_gif=out_gif, start_year=start_year, end_year=end_year, start_date=start_date, end_date=end_date, bands=bands, apply_fmask=apply_fmask, frames_per_second=speed, # dimensions=dimensions, dimensions=768, overlay_data=overlay_data, overlay_color=overlay_color, overlay_width=overlay_width, overlay_opacity=overlay_opacity, frequency=frequency, date_format=None, title=title, title_xy=("2%", "90%"), add_text=True, text_xy=("2%", "2%"), text_sequence=None, font_type=font_type, font_size=font_size, font_color=font_color, add_progress_bar=True, progress_bar_color=progress_bar_color, progress_bar_height=5, loop=0, mp4=mp4, fading=fading, ) elif collection == "Sentinel-2 MSI Surface Reflectance": out_gif = geemap.sentinel2_timelapse( roi=roi, out_gif=out_gif, start_year=start_year, end_year=end_year, start_date=start_date, end_date=end_date, bands=bands, apply_fmask=apply_fmask, frames_per_second=speed, dimensions=768, # dimensions=dimensions, overlay_data=overlay_data, overlay_color=overlay_color, overlay_width=overlay_width, overlay_opacity=overlay_opacity, frequency=frequency, date_format=None, title=title, title_xy=("2%", "90%"), add_text=True, text_xy=("2%", "2%"), text_sequence=None, font_type=font_type, font_size=font_size, font_color=font_color, add_progress_bar=True, progress_bar_color=progress_bar_color, progress_bar_height=5, loop=0, mp4=mp4, fading=fading, ) except: empty_text.error( "An error occurred while computing the timelapse. Your probably requested too much data. Try reducing the ROI or timespan." ) st.stop() if out_gif is not None and os.path.exists(out_gif): empty_text.text( "Right click the GIF to save it to your computer👇" ) empty_image.image(out_gif) out_mp4 = out_gif.replace(".gif", ".mp4") if mp4 and os.path.exists(out_mp4): with empty_video: st.text( "Right click the MP4 to save it to your computer👇" ) st.video(out_gif.replace(".gif", ".mp4")) else: empty_text.error( "Something went wrong. You probably requested too much data. Try reducing the ROI or timespan." ) elif collection == "Geostationary Operational Environmental Satellites (GOES)": video_empty.video("https://youtu.be/16fA2QORG4A") with st.form("submit_goes_form"): roi = None if st.session_state.get("roi") is not None: roi = st.session_state.get("roi") out_gif = geemap.temp_file_path(".gif") satellite = st.selectbox("Select a satellite:", ["GOES-17", "GOES-16"]) earliest_date = datetime.date(2017, 7, 10) latest_date = datetime.date.today() if sample_roi == "Uploaded GeoJSON": roi_start_date = today - datetime.timedelta(days=2) roi_end_date = today - datetime.timedelta(days=1) roi_start_time = datetime.time(14, 00) roi_end_time = datetime.time(1, 00) else: roi_start = goes_rois[sample_roi]["start_time"] roi_end = goes_rois[sample_roi]["end_time"] roi_start_date = datetime.datetime.strptime( roi_start[:10], "%Y-%m-%d" ) roi_end_date = datetime.datetime.strptime(roi_end[:10], "%Y-%m-%d") roi_start_time = datetime.time( int(roi_start[11:13]), int(roi_start[14:16]) ) roi_end_time = datetime.time( int(roi_end[11:13]), int(roi_end[14:16]) ) start_date = st.date_input("Select the start date:", roi_start_date) end_date = st.date_input("Select the end date:", roi_end_date) with st.expander("Customize timelapse"): add_fire = st.checkbox("Add Fire/Hotspot Characterization", False) scan_type = st.selectbox( "Select a scan type:", ["Full Disk", "CONUS", "Mesoscale"] ) start_time = st.time_input( "Select the start time of the start date:", roi_start_time ) end_time = st.time_input( "Select the end time of the end date:", roi_end_time ) start = ( start_date.strftime("%Y-%m-%d") + "T" + start_time.strftime("%H:%M:%S") ) end = ( end_date.strftime("%Y-%m-%d") + "T" + end_time.strftime("%H:%M:%S") ) speed = st.slider("Frames per second:", 1, 30, 5) add_progress_bar = st.checkbox("Add a progress bar", True) progress_bar_color = st.color_picker( "Progress bar color:", "#0000ff" ) font_size = st.slider("Font size:", 10, 50, 20) font_color = st.color_picker("Font color:", "#ffffff") fading = st.slider( "Fading duration (seconds) for each frame:", 0.0, 3.0, 0.0 ) mp4 = st.checkbox("Save timelapse as MP4", True) empty_text = st.empty() empty_image = st.empty() empty_video = st.container() empty_fire_text = st.empty() empty_fire_image = st.empty() submitted = st.form_submit_button("Submit") if submitted: if sample_roi == "Uploaded GeoJSON" and data is None: empty_text.warning( "Steps to create a timelapse: Draw a rectangle on the map -> Export it as a GeoJSON -> Upload it back to the app -> Click the Submit button. Alternatively, you can select a sample ROI from the dropdown list." ) else: empty_text.text("Computing... Please wait...") geemap.goes_timelapse( roi, out_gif, start_date=start, end_date=end, data=satellite, scan=scan_type.replace(" ", "_").lower(), dimensions=768, framesPerSecond=speed, date_format="YYYY-MM-dd HH:mm", xy=("3%", "3%"), text_sequence=None, font_type="arial.ttf", font_size=font_size, font_color=font_color, add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color, progress_bar_height=5, loop=0, overlay_data=overlay_data, overlay_color=overlay_color, overlay_width=overlay_width, overlay_opacity=overlay_opacity, mp4=mp4, fading=fading, ) if out_gif is not None and os.path.exists(out_gif): empty_text.text( "Right click the GIF to save it to your computer👇" ) empty_image.image(out_gif) out_mp4 = out_gif.replace(".gif", ".mp4") if mp4 and os.path.exists(out_mp4): with empty_video: st.text( "Right click the MP4 to save it to your computer👇" ) st.video(out_gif.replace(".gif", ".mp4")) if add_fire: out_fire_gif = geemap.temp_file_path(".gif") empty_fire_text.text( "Delineating Fire Hotspot... Please wait..." ) geemap.goes_fire_timelapse( out_fire_gif, start_date=start, end_date=end, data=satellite, scan=scan_type.replace(" ", "_").lower(), region=roi, dimensions=768, framesPerSecond=speed, date_format="YYYY-MM-dd HH:mm", xy=("3%", "3%"), text_sequence=None, font_type="arial.ttf", font_size=font_size, font_color=font_color, add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color, progress_bar_height=5, loop=0, ) if os.path.exists(out_fire_gif): empty_fire_image.image(out_fire_gif) else: empty_text.text( "Something went wrong, either the ROI is too big or there are no data available for the specified date range. Please try a smaller ROI or different date range." ) elif collection == "MODIS Vegetation Indices (NDVI/EVI) 16-Day Global 1km": video_empty.video("https://youtu.be/16fA2QORG4A") satellite = st.selectbox("Select a satellite:", ["Terra", "Aqua"]) band = st.selectbox("Select a band:", ["NDVI", "EVI"]) with st.form("submit_modis_form"): roi = None if st.session_state.get("roi") is not None: roi = st.session_state.get("roi") out_gif = geemap.temp_file_path(".gif") with st.expander("Customize timelapse"): start = st.date_input( "Select a start date:", datetime.date(2000, 2, 8) ) end = st.date_input("Select an end date:", datetime.date.today()) start_date = start.strftime("%Y-%m-%d") end_date = end.strftime("%Y-%m-%d") speed = st.slider("Frames per second:", 1, 30, 5) add_progress_bar = st.checkbox("Add a progress bar", True) progress_bar_color = st.color_picker( "Progress bar color:", "#0000ff" ) font_size = st.slider("Font size:", 10, 50, 20) font_color = st.color_picker("Font color:", "#ffffff") font_type = st.selectbox( "Select the font type for the title:", ["arial.ttf", "alibaba.otf"], index=0, ) fading = st.slider( "Fading duration (seconds) for each frame:", 0.0, 3.0, 0.0 ) mp4 = st.checkbox("Save timelapse as MP4", True) empty_text = st.empty() empty_image = st.empty() empty_video = st.container() submitted = st.form_submit_button("Submit") if submitted: if sample_roi == "Uploaded GeoJSON" and data is None: empty_text.warning( "Steps to create a timelapse: Draw a rectangle on the map -> Export it as a GeoJSON -> Upload it back to the app -> Click the Submit button. Alternatively, you can select a sample ROI from the dropdown list." ) else: empty_text.text("Computing... Please wait...") geemap.modis_ndvi_timelapse( roi, out_gif, satellite, band, start_date, end_date, 768, speed, overlay_data=overlay_data, overlay_color=overlay_color, overlay_width=overlay_width, overlay_opacity=overlay_opacity, mp4=mp4, fading=fading, ) geemap.reduce_gif_size(out_gif) empty_text.text( "Right click the GIF to save it to your computer👇" ) empty_image.image(out_gif) out_mp4 = out_gif.replace(".gif", ".mp4") if mp4 and os.path.exists(out_mp4): with empty_video: st.text( "Right click the MP4 to save it to your computer👇" ) st.video(out_gif.replace(".gif", ".mp4")) elif collection == "Any Earth Engine ImageCollection": with st.form("submit_ts_form"): with st.expander("Customize timelapse"): title = st.text_input( "Enter a title to show on the timelapse: ", "Timelapse" ) start_date = st.date_input( "Select the start date:", datetime.date(2020, 1, 1) ) end_date = st.date_input( "Select the end date:", datetime.date.today() ) frequency = st.selectbox( "Select a temporal frequency:", ["year", "quarter", "month", "day", "hour", "minute", "second"], index=0, ) reducer = st.selectbox( "Select a reducer for aggregating data:", ["median", "mean", "min", "max", "sum", "variance", "stdDev"], index=0, ) data_format = st.selectbox( "Select a date format to show on the timelapse:", [ "YYYY-MM-dd", "YYYY", "YYMM-MM", "YYYY-MM-dd HH:mm", "YYYY-MM-dd HH:mm:ss", "HH:mm", "HH:mm:ss", "w", "M", "d", "D", ], index=0, ) speed = st.slider("Frames per second:", 1, 30, 5) add_progress_bar = st.checkbox("Add a progress bar", True) progress_bar_color = st.color_picker( "Progress bar color:", "#0000ff" ) font_size = st.slider("Font size:", 10, 50, 30) font_color = st.color_picker("Font color:", "#ffffff") font_type = st.selectbox( "Select the font type for the title:", ["arial.ttf", "alibaba.otf"], index=0, ) fading = st.slider( "Fading duration (seconds) for each frame:", 0.0, 3.0, 0.0 ) mp4 = st.checkbox("Save timelapse as MP4", True) empty_text = st.empty() empty_image = st.empty() empty_video = st.container() empty_fire_image = st.empty() roi = None if st.session_state.get("roi") is not None: roi = st.session_state.get("roi") out_gif = geemap.temp_file_path(".gif") submitted = st.form_submit_button("Submit") if submitted: if sample_roi == "Uploaded GeoJSON" and data is None: empty_text.warning( "Steps to create a timelapse: Draw a rectangle on the map -> Export it as a GeoJSON -> Upload it back to the app -> Click the Submit button. Alternatively, you can select a sample ROI from the dropdown list." ) else: empty_text.text("Computing... Please wait...") try: geemap.create_timelapse( st.session_state.get("ee_asset_id"), start_date=start_date.strftime("%Y-%m-%d"), end_date=end_date.strftime("%Y-%m-%d"), region=roi, frequency=frequency, reducer=reducer, date_format=data_format, out_gif=out_gif, bands=st.session_state.get("bands"), palette=st.session_state.get("palette"), vis_params=st.session_state.get("vis_params"), dimensions=768, frames_per_second=speed, crs="EPSG:3857", overlay_data=overlay_data, overlay_color=overlay_color, overlay_width=overlay_width, overlay_opacity=overlay_opacity, title=title, title_xy=("2%", "90%"), add_text=True, text_xy=("2%", "2%"), text_sequence=None, font_type=font_type, font_size=font_size, font_color=font_color, add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color, progress_bar_height=5, loop=0, mp4=mp4, fading=fading, ) except: empty_text.error( "An error occurred while computing the timelapse. You probably requested too much data. Try reducing the ROI or timespan." ) empty_text.text( "Right click the GIF to save it to your computer👇" ) empty_image.image(out_gif) out_mp4 = out_gif.replace(".gif", ".mp4") if mp4 and os.path.exists(out_mp4): with empty_video: st.text( "Right click the MP4 to save it to your computer👇" ) st.video(out_gif.replace(".gif", ".mp4")) elif collection in [ "MODIS Gap filled Land Surface Temperature Daily", "MODIS Ocean Color SMI", ]: with st.form("submit_ts_form"): with st.expander("Customize timelapse"): title = st.text_input( "Enter a title to show on the timelapse: ", "Surface Temperature", ) start_date = st.date_input( "Select the start date:", datetime.date(2018, 1, 1) ) end_date = st.date_input( "Select the end date:", datetime.date(2020, 12, 31) ) frequency = st.selectbox( "Select a temporal frequency:", ["year", "quarter", "month", "week", "day"], index=2, ) reducer = st.selectbox( "Select a reducer for aggregating data:", ["median", "mean", "min", "max", "sum", "variance", "stdDev"], index=0, ) vis_params = st.text_area( "Enter visualization parameters", "", help="Enter a string in the format of a dictionary, such as '{'min': 23, 'max': 32}'", ) speed = st.slider("Frames per second:", 1, 30, 5) add_progress_bar = st.checkbox("Add a progress bar", True) progress_bar_color = st.color_picker( "Progress bar color:", "#0000ff" ) font_size = st.slider("Font size:", 10, 50, 30) font_color = st.color_picker("Font color:", "#ffffff") font_type = st.selectbox( "Select the font type for the title:", ["arial.ttf", "alibaba.otf"], index=0, ) add_colorbar = st.checkbox("Add a colorbar", True) colorbar_label = st.text_input( "Enter the colorbar label:", "Surface Temperature (°C)" ) fading = st.slider( "Fading duration (seconds) for each frame:", 0.0, 3.0, 0.0 ) mp4 = st.checkbox("Save timelapse as MP4", True) empty_text = st.empty() empty_image = st.empty() empty_video = st.container() roi = None if st.session_state.get("roi") is not None: roi = st.session_state.get("roi") out_gif = geemap.temp_file_path(".gif") submitted = st.form_submit_button("Submit") if submitted: if sample_roi == "Uploaded GeoJSON" and data is None: empty_text.warning( "Steps to create a timelapse: Draw a rectangle on the map -> Export it as a GeoJSON -> Upload it back to the app -> Click the Submit button. Alternatively, you can select a sample ROI from the dropdown list." ) else: empty_text.text("Computing... Please wait...") try: if ( collection == "MODIS Gap filled Land Surface Temperature Daily" ): out_gif = geemap.create_timelapse( st.session_state.get("ee_asset_id"), start_date=start_date.strftime("%Y-%m-%d"), end_date=end_date.strftime("%Y-%m-%d"), region=roi, bands=None, frequency=frequency, reducer=reducer, date_format=None, out_gif=out_gif, palette=st.session_state.get("palette"), vis_params=None, dimensions=768, frames_per_second=speed, crs="EPSG:3857", overlay_data=overlay_data, overlay_color=overlay_color, overlay_width=overlay_width, overlay_opacity=overlay_opacity, title=title, title_xy=("2%", "90%"), add_text=True, text_xy=("2%", "2%"), text_sequence=None, font_type=font_type, font_size=font_size, font_color=font_color, add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color, progress_bar_height=5, add_colorbar=add_colorbar, colorbar_label=colorbar_label, loop=0, mp4=mp4, fading=fading, ) elif collection == "MODIS Ocean Color SMI": if vis_params.startswith("{") and vis_params.endswith( "}" ): vis_params = json.loads( vis_params.replace("'", '"') ) else: vis_params = None out_gif = geemap.modis_ocean_color_timelapse( st.session_state.get("ee_asset_id"), start_date=start_date.strftime("%Y-%m-%d"), end_date=end_date.strftime("%Y-%m-%d"), region=roi, bands=st.session_state["band"], frequency=frequency, reducer=reducer, date_format=None, out_gif=out_gif, palette=st.session_state.get("palette"), vis_params=vis_params, dimensions=768, frames_per_second=speed, crs="EPSG:3857", overlay_data=overlay_data, overlay_color=overlay_color, overlay_width=overlay_width, overlay_opacity=overlay_opacity, title=title, title_xy=("2%", "90%"), add_text=True, text_xy=("2%", "2%"), text_sequence=None, font_type=font_type, font_size=font_size, font_color=font_color, add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color, progress_bar_height=5, add_colorbar=add_colorbar, colorbar_label=colorbar_label, loop=0, mp4=mp4, fading=fading, ) except: empty_text.error( "Something went wrong. You probably requested too much data. Try reducing the ROI or timespan." ) if out_gif is not None and os.path.exists(out_gif): geemap.reduce_gif_size(out_gif) empty_text.text( "Right click the GIF to save it to your computer👇" ) empty_image.image(out_gif) out_mp4 = out_gif.replace(".gif", ".mp4") if mp4 and os.path.exists(out_mp4): with empty_video: st.text( "Right click the MP4 to save it to your computer👇" ) st.video(out_gif.replace(".gif", ".mp4")) else: st.error( "Something went wrong. You probably requested too much data. Try reducing the ROI or timespan." ) elif collection == "USDA National Agriculture Imagery Program (NAIP)": with st.form("submit_naip_form"): with st.expander("Customize timelapse"): title = st.text_input( "Enter a title to show on the timelapse: ", "NAIP Timelapse" ) years = st.slider( "Start and end year:", 2003, today.year, (2003, today.year), ) bands = st.selectbox( "Select a band combination:", ["N/R/G", "R/G/B"], index=0 ) speed = st.slider("Frames per second:", 1, 30, 3) add_progress_bar = st.checkbox("Add a progress bar", True) progress_bar_color = st.color_picker( "Progress bar color:", "#0000ff" ) font_size = st.slider("Font size:", 10, 50, 30) font_color = st.color_picker("Font color:", "#ffffff") font_type = st.selectbox( "Select the font type for the title:", ["arial.ttf", "alibaba.otf"], index=0, ) fading = st.slider( "Fading duration (seconds) for each frame:", 0.0, 3.0, 0.0 ) mp4 = st.checkbox("Save timelapse as MP4", True) empty_text = st.empty() empty_image = st.empty() empty_video = st.container() empty_fire_image = st.empty() roi = None if st.session_state.get("roi") is not None: roi = st.session_state.get("roi") out_gif = geemap.temp_file_path(".gif") submitted = st.form_submit_button("Submit") if submitted: if sample_roi == "Uploaded GeoJSON" and data is None: empty_text.warning( "Steps to create a timelapse: Draw a rectangle on the map -> Export it as a GeoJSON -> Upload it back to the app -> Click the Submit button. Alternatively, you can select a sample ROI from the dropdown list." ) else: empty_text.text("Computing... Please wait...") try: geemap.naip_timelapse( roi, years[0], years[1], out_gif, bands=bands.split("/"), palette=st.session_state.get("palette"), vis_params=None, dimensions=768, frames_per_second=speed, crs="EPSG:3857", overlay_data=overlay_data, overlay_color=overlay_color, overlay_width=overlay_width, overlay_opacity=overlay_opacity, title=title, title_xy=("2%", "90%"), add_text=True, text_xy=("2%", "2%"), text_sequence=None, font_type=font_type, font_size=font_size, font_color=font_color, add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color, progress_bar_height=5, loop=0, mp4=mp4, fading=fading, ) except: empty_text.error( "Something went wrong. You either requested too much data or the ROI is outside the U.S." ) if out_gif is not None and os.path.exists(out_gif): empty_text.text( "Right click the GIF to save it to your computer👇" ) empty_image.image(out_gif) out_mp4 = out_gif.replace(".gif", ".mp4") if mp4 and os.path.exists(out_mp4): with empty_video: st.text( "Right click the MP4 to save it to your computer👇" ) st.video(out_gif.replace(".gif", ".mp4")) else: st.error( "Something went wrong. You either requested too much data or the ROI is outside the U.S." )
An interactive web app for creating [Landsat](https://developers.google.com/earth-engine/datasets/catalog/landsat)/[GOES](https://jstnbraaten.medium.com/goes-in-earth-engine-53fbc8783c16) timelapse for any location around the globe. The app was built using [streamlit](https://streamlit.io), [geemap](https://geemap.org), and [Google Earth Engine](https://earthengine.google.com). For more info, check out my streamlit [blog post](https://blog.streamlit.io/creating-satellite-timelapse-with-streamlit-and-earth-engine).
app
python
opengeos/streamlit-geospatial
pages/1_📷_Timelapse.py
https://github.com/opengeos/streamlit-geospatial/blob/master/pages/1_📷_Timelapse.py
MIT
def app(): st.title("Search Basemaps") st.markdown( """ This app is a demonstration of searching and loading basemaps from [xyzservices](https://github.com/geopandas/xyzservices) and [Quick Map Services (QMS)](https://github.com/nextgis/quickmapservices). Selecting from 1000+ basemaps with a few clicks. """ ) with st.expander("See demo"): st.image("https://i.imgur.com/0SkUhZh.gif") row1_col1, row1_col2 = st.columns([3, 1]) width = 800 height = 600 tiles = None with row1_col2: checkbox = st.checkbox("Search Quick Map Services (QMS)") keyword = st.text_input("Enter a keyword to search and press Enter:") empty = st.empty() if keyword: options = leafmap.search_xyz_services(keyword=keyword) if checkbox: qms = leafmap.search_qms(keyword=keyword) if qms is not None: options = options + qms tiles = empty.multiselect("Select XYZ tiles to add to the map:", options) with row1_col1: m = leafmap.Map() if tiles is not None: for tile in tiles: m.add_xyz_service(tile) m.to_streamlit(height=height)
This app is a demonstration of searching and loading basemaps from [xyzservices](https://github.com/geopandas/xyzservices) and [Quick Map Services (QMS)](https://github.com/nextgis/quickmapservices). Selecting from 1000+ basemaps with a few clicks.
app
python
opengeos/streamlit-geospatial
pages/6_🗺️_Basemaps.py
https://github.com/opengeos/streamlit-geospatial/blob/master/pages/6_🗺️_Basemaps.py
MIT
def app(): st.title("Web Map Service (WMS)") st.markdown( """ This app is a demonstration of loading Web Map Service (WMS) layers. Simply enter the URL of the WMS service in the text box below and press Enter to retrieve the layers. Go to https://apps.nationalmap.gov/services to find some WMS URLs if needed. """ ) row1_col1, row1_col2 = st.columns([3, 1.3]) width = 800 height = 600 layers = None with row1_col2: esa_landcover = "https://services.terrascope.be/wms/v2" url = st.text_input( "Enter a WMS URL:", value="https://services.terrascope.be/wms/v2" ) empty = st.empty() if url: if is_trusted_url(url): options = get_layers(url) # Process options as needed else: st.error( "The entered URL is not trusted. Please enter a valid WMS URL." ) default = None if url == esa_landcover: default = "WORLDCOVER_2020_MAP" layers = empty.multiselect( "Select WMS layers to add to the map:", options, default=default ) add_legend = st.checkbox("Add a legend to the map", value=True) if default == "WORLDCOVER_2020_MAP": legend = str(leafmap.builtin_legends["ESA_WorldCover"]) else: legend = "" if add_legend: legend_text = st.text_area( "Enter a legend as a dictionary {label: color}", value=legend, height=200, ) with row1_col1: m = leafmap.Map(center=(36.3, 0), zoom=2) if layers is not None: for layer in layers: m.add_wms_layer( url, layers=layer, name=layer, attribution=" ", transparent=True ) if add_legend and legend_text: legend_dict = json.loads(legend_text.replace("'", '"')) m.add_legend(legend_dict=legend_dict) m.to_streamlit(height=height)
This app is a demonstration of loading Web Map Service (WMS) layers. Simply enter the URL of the WMS service in the text box below and press Enter to retrieve the layers. Go to https://apps.nationalmap.gov/services to find some WMS URLs if needed.
app
python
opengeos/streamlit-geospatial
pages/7_📦_Web_Map_Service.py
https://github.com/opengeos/streamlit-geospatial/blob/master/pages/7_📦_Web_Map_Service.py
MIT
def parse_cmdline_kwargs(args): ''' convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible ''' def parse(v): assert isinstance(v, str) try: return eval(v) except (NameError, SyntaxError): return v return {k: parse(v) for k,v in parse_unknown_args(args).items()}
convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible
parse_cmdline_kwargs
python
openai/baselines
baselines/run.py
https://github.com/openai/baselines/blob/master/baselines/run.py
MIT
def logkv(key, val): """ Log a value of some diagnostic Call this once for each diagnostic quantity, each iteration If called many times, last value will be used. """ get_current().logkv(key, val)
Log a value of some diagnostic Call this once for each diagnostic quantity, each iteration If called many times, last value will be used.
logkv
python
openai/baselines
baselines/logger.py
https://github.com/openai/baselines/blob/master/baselines/logger.py
MIT
def logkv_mean(key, val): """ The same as logkv(), but if called many times, values averaged. """ get_current().logkv_mean(key, val)
The same as logkv(), but if called many times, values averaged.
logkv_mean
python
openai/baselines
baselines/logger.py
https://github.com/openai/baselines/blob/master/baselines/logger.py
MIT
def logkvs(d): """ Log a dictionary of key-value pairs """ for (k, v) in d.items(): logkv(k, v)
Log a dictionary of key-value pairs
logkvs
python
openai/baselines
baselines/logger.py
https://github.com/openai/baselines/blob/master/baselines/logger.py
MIT
def dumpkvs(): """ Write all of the diagnostics from the current iteration """ return get_current().dumpkvs()
Write all of the diagnostics from the current iteration
dumpkvs
python
openai/baselines
baselines/logger.py
https://github.com/openai/baselines/blob/master/baselines/logger.py
MIT
def log(*args, level=INFO): """ Write the sequence of args, with no separators, to the console and output files (if you've configured an output file). """ get_current().log(*args, level=level)
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
log
python
openai/baselines
baselines/logger.py
https://github.com/openai/baselines/blob/master/baselines/logger.py
MIT
def set_level(level): """ Set logging threshold on current logger. """ get_current().set_level(level)
Set logging threshold on current logger.
set_level
python
openai/baselines
baselines/logger.py
https://github.com/openai/baselines/blob/master/baselines/logger.py
MIT
def get_dir(): """ Get directory that log files are being written to. will be None if there is no output directory (i.e., if you didn't call start) """ return get_current().get_dir()
Get directory that log files are being written to. will be None if there is no output directory (i.e., if you didn't call start)
get_dir
python
openai/baselines
baselines/logger.py
https://github.com/openai/baselines/blob/master/baselines/logger.py
MIT
def profile(n): """ Usage: @profile("my_func") def my_func(): code """ def decorator_with_name(func): def func_wrapper(*args, **kwargs): with profile_kv(n): return func(*args, **kwargs) return func_wrapper return decorator_with_name
Usage: @profile("my_func") def my_func(): code
profile
python
openai/baselines
baselines/logger.py
https://github.com/openai/baselines/blob/master/baselines/logger.py
MIT
def configure(dir=None, format_strs=None, comm=None, log_suffix=''): """ If comm is provided, average all numerical stats across that comm """ if dir is None: dir = os.getenv('OPENAI_LOGDIR') if dir is None: dir = osp.join(tempfile.gettempdir(), datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f")) assert isinstance(dir, str) dir = os.path.expanduser(dir) os.makedirs(os.path.expanduser(dir), exist_ok=True) rank = get_rank_without_mpi_import() if rank > 0: log_suffix = log_suffix + "-rank%03i" % rank if format_strs is None: if rank == 0: format_strs = os.getenv('OPENAI_LOG_FORMAT', 'stdout,log,csv').split(',') else: format_strs = os.getenv('OPENAI_LOG_FORMAT_MPI', 'log').split(',') format_strs = filter(None, format_strs) output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs] Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm) if output_formats: log('Logging to %s'%dir)
If comm is provided, average all numerical stats across that comm
configure
python
openai/baselines
baselines/logger.py
https://github.com/openai/baselines/blob/master/baselines/logger.py
MIT
def read_tb(path): """ path : a tensorboard file OR a directory, where we will find all TB files of the form events.* """ import pandas import numpy as np from glob import glob import tensorflow as tf if osp.isdir(path): fnames = glob(osp.join(path, "events.*")) elif osp.basename(path).startswith("events."): fnames = [path] else: raise NotImplementedError("Expected tensorboard file or directory containing them. Got %s"%path) tag2pairs = defaultdict(list) maxstep = 0 for fname in fnames: for summary in tf.train.summary_iterator(fname): if summary.step > 0: for v in summary.summary.value: pair = (summary.step, v.simple_value) tag2pairs[v.tag].append(pair) maxstep = max(summary.step, maxstep) data = np.empty((maxstep, len(tag2pairs))) data[:] = np.nan tags = sorted(tag2pairs.keys()) for (colidx,tag) in enumerate(tags): pairs = tag2pairs[tag] for (step, value) in pairs: data[step-1, colidx] = value return pandas.DataFrame(data, columns=tags)
path : a tensorboard file OR a directory, where we will find all TB files of the form events.*
read_tb
python
openai/baselines
baselines/logger.py
https://github.com/openai/baselines/blob/master/baselines/logger.py
MIT
def learn( network, env, seed=None, nsteps=5, total_timesteps=int(80e6), vf_coef=0.5, ent_coef=0.01, max_grad_norm=0.5, lr=7e-4, lrschedule='linear', epsilon=1e-5, alpha=0.99, gamma=0.99, log_interval=100, load_path=None, **network_kwargs): ''' Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm. Parameters: ----------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See baselines.common/policies.py/lstm for more details on using recurrent nets in policies env: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py) seed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible) nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) total_timesteps: int, total number of timesteps to train on (default: 80M) vf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5) ent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01) max_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5) lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4) lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and returns fraction of the learning rate (specified as lr) as output epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5) alpha: float, RMSProp decay parameter (default: 0.99) gamma: float, reward discounting parameter (default: 0.99) log_interval: int, specifies how frequently the logs are printed out (default: 100) **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers. ''' set_global_seeds(seed) # Get the nb of env nenvs = env.num_envs policy = build_policy(env, network, **network_kwargs) # Instantiate the model object (that creates step_model and train_model) model = Model(policy=policy, env=env, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule) if load_path is not None: model.load(load_path) # Instantiate the runner object runner = Runner(env, model, nsteps=nsteps, gamma=gamma) epinfobuf = deque(maxlen=100) # Calculate the batch_size nbatch = nenvs*nsteps # Start total timer tstart = time.time() for update in range(1, total_timesteps//nbatch+1): # Get mini batch of experiences obs, states, rewards, masks, actions, values, epinfos = runner.run() epinfobuf.extend(epinfos) policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values) nseconds = time.time()-tstart # Calculate the fps (frame per second) fps = int((update*nbatch)/nseconds) if update % log_interval == 0 or update == 1: # Calculates if value function is a good predicator of the returns (ev > 1) # or if it's just worse than predicting nothing (ev =< 0) ev = explained_variance(values, rewards) logger.record_tabular("nupdates", update) logger.record_tabular("total_timesteps", update*nbatch) logger.record_tabular("fps", fps) logger.record_tabular("policy_entropy", float(policy_entropy)) logger.record_tabular("value_loss", float(value_loss)) logger.record_tabular("explained_variance", float(ev)) logger.record_tabular("eprewmean", safemean([epinfo['r'] for epinfo in epinfobuf])) logger.record_tabular("eplenmean", safemean([epinfo['l'] for epinfo in epinfobuf])) logger.dump_tabular() return model
Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm. Parameters: ----------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See baselines.common/policies.py/lstm for more details on using recurrent nets in policies env: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py) seed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible) nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) total_timesteps: int, total number of timesteps to train on (default: 80M) vf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5) ent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01) max_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5) lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4) lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and returns fraction of the learning rate (specified as lr) as output epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5) alpha: float, RMSProp decay parameter (default: 0.99) gamma: float, reward discounting parameter (default: 0.99) log_interval: int, specifies how frequently the logs are printed out (default: 100) **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
learn
python
openai/baselines
baselines/a2c/a2c.py
https://github.com/openai/baselines/blob/master/baselines/a2c/a2c.py
MIT
def __init__(self, name="(unnamed)"): """Generalized Tensorflow placeholder. The main differences are: - possibly uses multiple placeholders internally and returns multiple values - can apply light postprocessing to the value feed to placeholder. """ self.name = name
Generalized Tensorflow placeholder. The main differences are: - possibly uses multiple placeholders internally and returns multiple values - can apply light postprocessing to the value feed to placeholder.
__init__
python
openai/baselines
baselines/deepq/utils.py
https://github.com/openai/baselines/blob/master/baselines/deepq/utils.py
MIT
def get(self): """Return the tf variable(s) representing the possibly postprocessed value of placeholder(s). """ raise NotImplementedError
Return the tf variable(s) representing the possibly postprocessed value of placeholder(s).
get
python
openai/baselines
baselines/deepq/utils.py
https://github.com/openai/baselines/blob/master/baselines/deepq/utils.py
MIT
def make_feed_dict(self, data): """Given data input it to the placeholder(s).""" raise NotImplementedError
Given data input it to the placeholder(s).
make_feed_dict
python
openai/baselines
baselines/deepq/utils.py
https://github.com/openai/baselines/blob/master/baselines/deepq/utils.py
MIT
def __init__(self, placeholder): """Wrapper for regular tensorflow placeholder.""" super().__init__(placeholder.name) self._placeholder = placeholder
Wrapper for regular tensorflow placeholder.
__init__
python
openai/baselines
baselines/deepq/utils.py
https://github.com/openai/baselines/blob/master/baselines/deepq/utils.py
MIT
def __init__(self, observation_space, name=None): """Creates an input placeholder tailored to a specific observation space Parameters ---------- observation_space: observation space of the environment. Should be one of the gym.spaces types name: str tensorflow name of the underlying placeholder """ inpt, self.processed_inpt = observation_input(observation_space, name=name) super().__init__(inpt)
Creates an input placeholder tailored to a specific observation space Parameters ---------- observation_space: observation space of the environment. Should be one of the gym.spaces types name: str tensorflow name of the underlying placeholder
__init__
python
openai/baselines
baselines/deepq/utils.py
https://github.com/openai/baselines/blob/master/baselines/deepq/utils.py
MIT
def __init__(self, size): """Create Replay buffer. Parameters ---------- size: int Max number of transitions to store in the buffer. When the buffer overflows the old memories are dropped. """ self._storage = [] self._maxsize = size self._next_idx = 0
Create Replay buffer. Parameters ---------- size: int Max number of transitions to store in the buffer. When the buffer overflows the old memories are dropped.
__init__
python
openai/baselines
baselines/deepq/replay_buffer.py
https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
MIT
def sample(self, batch_size): """Sample a batch of experiences. Parameters ---------- batch_size: int How many transitions to sample. Returns ------- obs_batch: np.array batch of observations act_batch: np.array batch of actions executed given obs_batch rew_batch: np.array rewards received as results of executing act_batch next_obs_batch: np.array next set of observations seen after executing act_batch done_mask: np.array done_mask[i] = 1 if executing act_batch[i] resulted in the end of an episode and 0 otherwise. """ idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)] return self._encode_sample(idxes)
Sample a batch of experiences. Parameters ---------- batch_size: int How many transitions to sample. Returns ------- obs_batch: np.array batch of observations act_batch: np.array batch of actions executed given obs_batch rew_batch: np.array rewards received as results of executing act_batch next_obs_batch: np.array next set of observations seen after executing act_batch done_mask: np.array done_mask[i] = 1 if executing act_batch[i] resulted in the end of an episode and 0 otherwise.
sample
python
openai/baselines
baselines/deepq/replay_buffer.py
https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
MIT
def __init__(self, size, alpha): """Create Prioritized Replay buffer. Parameters ---------- size: int Max number of transitions to store in the buffer. When the buffer overflows the old memories are dropped. alpha: float how much prioritization is used (0 - no prioritization, 1 - full prioritization) See Also -------- ReplayBuffer.__init__ """ super(PrioritizedReplayBuffer, self).__init__(size) assert alpha >= 0 self._alpha = alpha it_capacity = 1 while it_capacity < size: it_capacity *= 2 self._it_sum = SumSegmentTree(it_capacity) self._it_min = MinSegmentTree(it_capacity) self._max_priority = 1.0
Create Prioritized Replay buffer. Parameters ---------- size: int Max number of transitions to store in the buffer. When the buffer overflows the old memories are dropped. alpha: float how much prioritization is used (0 - no prioritization, 1 - full prioritization) See Also -------- ReplayBuffer.__init__
__init__
python
openai/baselines
baselines/deepq/replay_buffer.py
https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
MIT
def add(self, *args, **kwargs): """See ReplayBuffer.store_effect""" idx = self._next_idx super().add(*args, **kwargs) self._it_sum[idx] = self._max_priority ** self._alpha self._it_min[idx] = self._max_priority ** self._alpha
See ReplayBuffer.store_effect
add
python
openai/baselines
baselines/deepq/replay_buffer.py
https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
MIT
def sample(self, batch_size, beta): """Sample a batch of experiences. compared to ReplayBuffer.sample it also returns importance weights and idxes of sampled experiences. Parameters ---------- batch_size: int How many transitions to sample. beta: float To what degree to use importance weights (0 - no corrections, 1 - full correction) Returns ------- obs_batch: np.array batch of observations act_batch: np.array batch of actions executed given obs_batch rew_batch: np.array rewards received as results of executing act_batch next_obs_batch: np.array next set of observations seen after executing act_batch done_mask: np.array done_mask[i] = 1 if executing act_batch[i] resulted in the end of an episode and 0 otherwise. weights: np.array Array of shape (batch_size,) and dtype np.float32 denoting importance weight of each sampled transition idxes: np.array Array of shape (batch_size,) and dtype np.int32 idexes in buffer of sampled experiences """ assert beta > 0 idxes = self._sample_proportional(batch_size) weights = [] p_min = self._it_min.min() / self._it_sum.sum() max_weight = (p_min * len(self._storage)) ** (-beta) for idx in idxes: p_sample = self._it_sum[idx] / self._it_sum.sum() weight = (p_sample * len(self._storage)) ** (-beta) weights.append(weight / max_weight) weights = np.array(weights) encoded_sample = self._encode_sample(idxes) return tuple(list(encoded_sample) + [weights, idxes])
Sample a batch of experiences. compared to ReplayBuffer.sample it also returns importance weights and idxes of sampled experiences. Parameters ---------- batch_size: int How many transitions to sample. beta: float To what degree to use importance weights (0 - no corrections, 1 - full correction) Returns ------- obs_batch: np.array batch of observations act_batch: np.array batch of actions executed given obs_batch rew_batch: np.array rewards received as results of executing act_batch next_obs_batch: np.array next set of observations seen after executing act_batch done_mask: np.array done_mask[i] = 1 if executing act_batch[i] resulted in the end of an episode and 0 otherwise. weights: np.array Array of shape (batch_size,) and dtype np.float32 denoting importance weight of each sampled transition idxes: np.array Array of shape (batch_size,) and dtype np.int32 idexes in buffer of sampled experiences
sample
python
openai/baselines
baselines/deepq/replay_buffer.py
https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
MIT
def update_priorities(self, idxes, priorities): """Update priorities of sampled transitions. sets priority of transition at index idxes[i] in buffer to priorities[i]. Parameters ---------- idxes: [int] List of idxes of sampled transitions priorities: [float] List of updated priorities corresponding to transitions at the sampled idxes denoted by variable `idxes`. """ assert len(idxes) == len(priorities) for idx, priority in zip(idxes, priorities): assert priority > 0 assert 0 <= idx < len(self._storage) self._it_sum[idx] = priority ** self._alpha self._it_min[idx] = priority ** self._alpha self._max_priority = max(self._max_priority, priority)
Update priorities of sampled transitions. sets priority of transition at index idxes[i] in buffer to priorities[i]. Parameters ---------- idxes: [int] List of idxes of sampled transitions priorities: [float] List of updated priorities corresponding to transitions at the sampled idxes denoted by variable `idxes`.
update_priorities
python
openai/baselines
baselines/deepq/replay_buffer.py
https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
MIT
def scope_vars(scope, trainable_only=False): """ Get variables inside a scope The scope can be specified as a string Parameters ---------- scope: str or VariableScope scope in which the variables reside. trainable_only: bool whether or not to return only the variables that were marked as trainable. Returns ------- vars: [tf.Variable] list of variables in `scope`. """ return tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.GraphKeys.GLOBAL_VARIABLES, scope=scope if isinstance(scope, str) else scope.name )
Get variables inside a scope The scope can be specified as a string Parameters ---------- scope: str or VariableScope scope in which the variables reside. trainable_only: bool whether or not to return only the variables that were marked as trainable. Returns ------- vars: [tf.Variable] list of variables in `scope`.
scope_vars
python
openai/baselines
baselines/deepq/build_graph.py
https://github.com/openai/baselines/blob/master/baselines/deepq/build_graph.py
MIT
def scope_name(): """Returns the name of current scope as a string, e.g. deepq/q_func""" return tf.get_variable_scope().name
Returns the name of current scope as a string, e.g. deepq/q_func
scope_name
python
openai/baselines
baselines/deepq/build_graph.py
https://github.com/openai/baselines/blob/master/baselines/deepq/build_graph.py
MIT
def absolute_scope_name(relative_scope_name): """Appends parent scope name to `relative_scope_name`""" return scope_name() + "/" + relative_scope_name
Appends parent scope name to `relative_scope_name`
absolute_scope_name
python
openai/baselines
baselines/deepq/build_graph.py
https://github.com/openai/baselines/blob/master/baselines/deepq/build_graph.py
MIT
def build_act(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None): """Creates the act function: Parameters ---------- make_obs_ph: str -> tf.placeholder or TfInput a function that take a name and creates a placeholder of input with that name q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions: int number of actions. scope: str or VariableScope optional scope for variable_scope. reuse: bool or None whether or not the variables should be reused. To be able to reuse the scope must be given. Returns ------- act: (tf.Variable, bool, float) -> tf.Variable function to select and action given observation. ` See the top of the file for details. """ with tf.variable_scope(scope, reuse=reuse): observations_ph = make_obs_ph("observation") stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) q_values = q_func(observations_ph.get(), num_actions, scope="q_func") deterministic_actions = tf.argmax(q_values, axis=1) batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) _act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph], outputs=output_actions, givens={update_eps_ph: -1.0, stochastic_ph: True}, updates=[update_eps_expr]) def act(ob, stochastic=True, update_eps=-1): return _act(ob, stochastic, update_eps) return act
Creates the act function: Parameters ---------- make_obs_ph: str -> tf.placeholder or TfInput a function that take a name and creates a placeholder of input with that name q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions: int number of actions. scope: str or VariableScope optional scope for variable_scope. reuse: bool or None whether or not the variables should be reused. To be able to reuse the scope must be given. Returns ------- act: (tf.Variable, bool, float) -> tf.Variable function to select and action given observation. ` See the top of the file for details.
build_act
python
openai/baselines
baselines/deepq/build_graph.py
https://github.com/openai/baselines/blob/master/baselines/deepq/build_graph.py
MIT
def build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None, param_noise_filter_func=None): """Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905): Parameters ---------- make_obs_ph: str -> tf.placeholder or TfInput a function that take a name and creates a placeholder of input with that name q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions: int number of actions. scope: str or VariableScope optional scope for variable_scope. reuse: bool or None whether or not the variables should be reused. To be able to reuse the scope must be given. param_noise_filter_func: tf.Variable -> bool function that decides whether or not a variable should be perturbed. Only applicable if param_noise is True. If set to None, default_param_noise_filter is used by default. Returns ------- act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable function to select and action given observation. ` See the top of the file for details. """ if param_noise_filter_func is None: param_noise_filter_func = default_param_noise_filter with tf.variable_scope(scope, reuse=reuse): observations_ph = make_obs_ph("observation") stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold") update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale") reset_ph = tf.placeholder(tf.bool, (), name="reset") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False) param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False) # Unmodified Q. q_values = q_func(observations_ph.get(), num_actions, scope="q_func") # Perturbable Q used for the actual rollout. q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func") # We have to wrap this code into a function due to the way tf.cond() works. See # https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for # a more detailed discussion. def perturb_vars(original_scope, perturbed_scope): all_vars = scope_vars(absolute_scope_name(original_scope)) all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope)) assert len(all_vars) == len(all_perturbed_vars) perturb_ops = [] for var, perturbed_var in zip(all_vars, all_perturbed_vars): if param_noise_filter_func(perturbed_var): # Perturb this variable. op = tf.assign(perturbed_var, var + tf.random_normal(shape=tf.shape(var), mean=0., stddev=param_noise_scale)) else: # Do not perturb, just assign. op = tf.assign(perturbed_var, var) perturb_ops.append(op) assert len(perturb_ops) == len(all_vars) return tf.group(*perturb_ops) # Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy # of the network and measures the effect of that perturbation in action space. If the perturbation # is too big, reduce scale of perturbation, otherwise increase. q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func") perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func") kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1) mean_kl = tf.reduce_mean(kl) def update_scale(): with tf.control_dependencies([perturb_for_adaption]): update_scale_expr = tf.cond(mean_kl < param_noise_threshold, lambda: param_noise_scale.assign(param_noise_scale * 1.01), lambda: param_noise_scale.assign(param_noise_scale / 1.01), ) return update_scale_expr # Functionality to update the threshold for parameter space noise. update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0, lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold)) # Put everything together. deterministic_actions = tf.argmax(q_values_perturbed, axis=1) batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) updates = [ update_eps_expr, tf.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"), lambda: tf.group(*[])), tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)), update_param_noise_threshold_expr, ] _act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph], outputs=output_actions, givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False}, updates=updates) def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True, update_eps=-1): return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale) return act
Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905): Parameters ---------- make_obs_ph: str -> tf.placeholder or TfInput a function that take a name and creates a placeholder of input with that name q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions: int number of actions. scope: str or VariableScope optional scope for variable_scope. reuse: bool or None whether or not the variables should be reused. To be able to reuse the scope must be given. param_noise_filter_func: tf.Variable -> bool function that decides whether or not a variable should be perturbed. Only applicable if param_noise is True. If set to None, default_param_noise_filter is used by default. Returns ------- act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable function to select and action given observation. ` See the top of the file for details.
build_act_with_param_noise
python
openai/baselines
baselines/deepq/build_graph.py
https://github.com/openai/baselines/blob/master/baselines/deepq/build_graph.py
MIT
def build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0, double_q=True, scope="deepq", reuse=None, param_noise=False, param_noise_filter_func=None): """Creates the train function: Parameters ---------- make_obs_ph: str -> tf.placeholder or TfInput a function that takes a name and creates a placeholder of input with that name q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions: int number of actions reuse: bool whether or not to reuse the graph variables optimizer: tf.train.Optimizer optimizer to use for the Q-learning objective. grad_norm_clipping: float or None clip gradient norms to this value. If None no clipping is performed. gamma: float discount rate. double_q: bool if true will use Double Q Learning (https://arxiv.org/abs/1509.06461). In general it is a good idea to keep it enabled. scope: str or VariableScope optional scope for variable_scope. reuse: bool or None whether or not the variables should be reused. To be able to reuse the scope must be given. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) param_noise_filter_func: tf.Variable -> bool function that decides whether or not a variable should be perturbed. Only applicable if param_noise is True. If set to None, default_param_noise_filter is used by default. Returns ------- act: (tf.Variable, bool, float) -> tf.Variable function to select and action given observation. ` See the top of the file for details. train: (object, np.array, np.array, object, np.array, np.array) -> np.array optimize the error in Bellman's equation. ` See the top of the file for details. update_target: () -> () copy the parameters from optimized Q function to the target Q function. ` See the top of the file for details. debug: {str: function} a bunch of functions to print debug data like q_values. """ if param_noise: act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse, param_noise_filter_func=param_noise_filter_func) else: act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse) with tf.variable_scope(scope, reuse=reuse): # set up placeholders obs_t_input = make_obs_ph("obs_t") act_t_ph = tf.placeholder(tf.int32, [None], name="action") rew_t_ph = tf.placeholder(tf.float32, [None], name="reward") obs_tp1_input = make_obs_ph("obs_tp1") done_mask_ph = tf.placeholder(tf.float32, [None], name="done") importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight") # q network evaluation q_t = q_func(obs_t_input.get(), num_actions, scope="q_func", reuse=True) # reuse parameters from act q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/q_func") # target q network evalution q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope="target_q_func") target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/target_q_func") # q scores for actions which we know were selected in the given state. q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1) # compute estimate of best possible value starting from state at t + 1 if double_q: q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", reuse=True) q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1) q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1) else: q_tp1_best = tf.reduce_max(q_tp1, 1) q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best # compute RHS of bellman equation q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked # compute the error (potentially clipped) td_error = q_t_selected - tf.stop_gradient(q_t_selected_target) errors = U.huber_loss(td_error) weighted_error = tf.reduce_mean(importance_weights_ph * errors) # compute optimization op (potentially with gradient clipping) if grad_norm_clipping is not None: gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars) for i, (grad, var) in enumerate(gradients): if grad is not None: gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var) optimize_expr = optimizer.apply_gradients(gradients) else: optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars) # update_target_fn will be called periodically to copy Q network to target Q network update_target_expr = [] for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name), sorted(target_q_func_vars, key=lambda v: v.name)): update_target_expr.append(var_target.assign(var)) update_target_expr = tf.group(*update_target_expr) # Create callable functions train = U.function( inputs=[ obs_t_input, act_t_ph, rew_t_ph, obs_tp1_input, done_mask_ph, importance_weights_ph ], outputs=td_error, updates=[optimize_expr] ) update_target = U.function([], [], updates=[update_target_expr]) q_values = U.function([obs_t_input], q_t) return act_f, train, update_target, {'q_values': q_values}
Creates the train function: Parameters ---------- make_obs_ph: str -> tf.placeholder or TfInput a function that takes a name and creates a placeholder of input with that name q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions: int number of actions reuse: bool whether or not to reuse the graph variables optimizer: tf.train.Optimizer optimizer to use for the Q-learning objective. grad_norm_clipping: float or None clip gradient norms to this value. If None no clipping is performed. gamma: float discount rate. double_q: bool if true will use Double Q Learning (https://arxiv.org/abs/1509.06461). In general it is a good idea to keep it enabled. scope: str or VariableScope optional scope for variable_scope. reuse: bool or None whether or not the variables should be reused. To be able to reuse the scope must be given. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) param_noise_filter_func: tf.Variable -> bool function that decides whether or not a variable should be perturbed. Only applicable if param_noise is True. If set to None, default_param_noise_filter is used by default. Returns ------- act: (tf.Variable, bool, float) -> tf.Variable function to select and action given observation. ` See the top of the file for details. train: (object, np.array, np.array, object, np.array, np.array) -> np.array optimize the error in Bellman's equation. ` See the top of the file for details. update_target: () -> () copy the parameters from optimized Q function to the target Q function. ` See the top of the file for details. debug: {str: function} a bunch of functions to print debug data like q_values.
build_train
python
openai/baselines
baselines/deepq/build_graph.py
https://github.com/openai/baselines/blob/master/baselines/deepq/build_graph.py
MIT
def save_act(self, path=None): """Save model to a pickle located at `path`""" if path is None: path = os.path.join(logger.get_dir(), "model.pkl") with tempfile.TemporaryDirectory() as td: save_variables(os.path.join(td, "model")) arc_name = os.path.join(td, "packed.zip") with zipfile.ZipFile(arc_name, 'w') as zipf: for root, dirs, files in os.walk(td): for fname in files: file_path = os.path.join(root, fname) if file_path != arc_name: zipf.write(file_path, os.path.relpath(file_path, td)) with open(arc_name, "rb") as f: model_data = f.read() with open(path, "wb") as f: cloudpickle.dump((model_data, self._act_params), f)
Save model to a pickle located at `path`
save_act
python
openai/baselines
baselines/deepq/deepq.py
https://github.com/openai/baselines/blob/master/baselines/deepq/deepq.py
MIT
def load_act(path): """Load act function that was returned by learn function. Parameters ---------- path: str path to the act function pickle Returns ------- act: ActWrapper function that takes a batch of observations and returns actions. """ return ActWrapper.load_act(path)
Load act function that was returned by learn function. Parameters ---------- path: str path to the act function pickle Returns ------- act: ActWrapper function that takes a batch of observations and returns actions.
load_act
python
openai/baselines
baselines/deepq/deepq.py
https://github.com/openai/baselines/blob/master/baselines/deepq/deepq.py
MIT
def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, **network_kwargs ): """Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. batch_size: int size of a batch sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = get_session() set_global_seeds(seed) q_func = build_q_func(network, **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) for t in range(total_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) load_variables(model_file) return act
Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. batch_size: int size of a batch sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function.
learn
python
openai/baselines
baselines/deepq/deepq.py
https://github.com/openai/baselines/blob/master/baselines/deepq/deepq.py
MIT
def model(inpt, num_actions, scope, reuse=False): """This model takes as input an observation and returns values of all actions.""" with tf.variable_scope(scope, reuse=reuse): out = inpt out = layers.fully_connected(out, num_outputs=64, activation_fn=tf.nn.tanh) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out
This model takes as input an observation and returns values of all actions.
model
python
openai/baselines
baselines/deepq/experiments/custom_cartpole.py
https://github.com/openai/baselines/blob/master/baselines/deepq/experiments/custom_cartpole.py
MIT
def logsigmoid(a): '''Equivalent to tf.log(tf.sigmoid(a))''' return -tf.nn.softplus(-a)
Equivalent to tf.log(tf.sigmoid(a))
logsigmoid
python
openai/baselines
baselines/gail/adversary.py
https://github.com/openai/baselines/blob/master/baselines/gail/adversary.py
MIT
def learn(*, network, env, total_timesteps, timesteps_per_batch=1024, # what to train on max_kl=0.001, cg_iters=10, gamma=0.99, lam=1.0, # advantage estimation seed=None, ent_coef=0.0, cg_damping=1e-2, vf_stepsize=3e-4, vf_iters =3, max_episodes=0, max_iters=0, # time constraint callback=None, load_path=None, **network_kwargs ): ''' learn a policy function with TRPO algorithm Parameters: ---------- network neural network to learn. Can be either string ('mlp', 'cnn', 'lstm', 'lnlstm' for basic types) or function that takes input placeholder and returns tuple (output, None) for feedforward nets or (output, (state_placeholder, state_output, mask_placeholder)) for recurrent nets env environment (one of the gym environments or wrapped via baselines.common.vec_env.VecEnv-type class timesteps_per_batch timesteps per gradient estimation batch max_kl max KL divergence between old policy and new policy ( KL(pi_old || pi) ) ent_coef coefficient of policy entropy term in the optimization objective cg_iters number of iterations of conjugate gradient algorithm cg_damping conjugate gradient damping vf_stepsize learning rate for adam optimizer used to optimie value function loss vf_iters number of iterations of value function optimization iterations per each policy optimization step total_timesteps max number of timesteps max_episodes max number of episodes max_iters maximum number of policy optimization iterations callback function to be called with (locals(), globals()) each policy optimization step load_path str, path to load the model from (default: None, i.e. no model is loaded) **network_kwargs keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network Returns: ------- learnt model ''' if MPI is not None: nworkers = MPI.COMM_WORLD.Get_size() rank = MPI.COMM_WORLD.Get_rank() else: nworkers = 1 rank = 0 cpus_per_worker = 1 U.get_session(config=tf.ConfigProto( allow_soft_placement=True, inter_op_parallelism_threads=cpus_per_worker, intra_op_parallelism_threads=cpus_per_worker )) policy = build_policy(env, network, value_network='copy', **network_kwargs) set_global_seeds(seed) np.set_printoptions(precision=3) # Setup losses and stuff # ---------------------------------------- ob_space = env.observation_space ac_space = env.action_space ob = observation_placeholder(ob_space) with tf.variable_scope("pi"): pi = policy(observ_placeholder=ob) with tf.variable_scope("oldpi"): oldpi = policy(observ_placeholder=ob) atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable) ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return ac = pi.pdtype.sample_placeholder([None]) kloldnew = oldpi.pd.kl(pi.pd) ent = pi.pd.entropy() meankl = tf.reduce_mean(kloldnew) meanent = tf.reduce_mean(ent) entbonus = ent_coef * meanent vferr = tf.reduce_mean(tf.square(pi.vf - ret)) ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # advantage * pnew / pold surrgain = tf.reduce_mean(ratio * atarg) optimgain = surrgain + entbonus losses = [optimgain, meankl, entbonus, surrgain, meanent] loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"] dist = meankl all_var_list = get_trainable_variables("pi") # var_list = [v for v in all_var_list if v.name.split("/")[1].startswith("pol")] # vf_var_list = [v for v in all_var_list if v.name.split("/")[1].startswith("vf")] var_list = get_pi_trainable_variables("pi") vf_var_list = get_vf_trainable_variables("pi") vfadam = MpiAdam(vf_var_list) get_flat = U.GetFlat(var_list) set_from_flat = U.SetFromFlat(var_list) klgrads = tf.gradients(dist, var_list) flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name="flat_tan") shapes = [var.get_shape().as_list() for var in var_list] start = 0 tangents = [] for shape in shapes: sz = U.intprod(shape) tangents.append(tf.reshape(flat_tangent[start:start+sz], shape)) start += sz gvp = tf.add_n([tf.reduce_sum(g*tangent) for (g, tangent) in zipsame(klgrads, tangents)]) #pylint: disable=E1111 fvp = U.flatgrad(gvp, var_list) assign_old_eq_new = U.function([],[], updates=[tf.assign(oldv, newv) for (oldv, newv) in zipsame(get_variables("oldpi"), get_variables("pi"))]) compute_losses = U.function([ob, ac, atarg], losses) compute_lossandgrad = U.function([ob, ac, atarg], losses + [U.flatgrad(optimgain, var_list)]) compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp) compute_vflossandgrad = U.function([ob, ret], U.flatgrad(vferr, vf_var_list)) @contextmanager def timed(msg): if rank == 0: print(colorize(msg, color='magenta')) tstart = time.time() yield print(colorize("done in %.3f seconds"%(time.time() - tstart), color='magenta')) else: yield def allmean(x): assert isinstance(x, np.ndarray) if MPI is not None: out = np.empty_like(x) MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM) out /= nworkers else: out = np.copy(x) return out U.initialize() if load_path is not None: pi.load(load_path) th_init = get_flat() if MPI is not None: MPI.COMM_WORLD.Bcast(th_init, root=0) set_from_flat(th_init) vfadam.sync() print("Init param sum", th_init.sum(), flush=True) # Prepare for rollouts # ---------------------------------------- seg_gen = traj_segment_generator(pi, env, timesteps_per_batch, stochastic=True) episodes_so_far = 0 timesteps_so_far = 0 iters_so_far = 0 tstart = time.time() lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards if sum([max_iters>0, total_timesteps>0, max_episodes>0])==0: # noththing to be done return pi assert sum([max_iters>0, total_timesteps>0, max_episodes>0]) < 2, \ 'out of max_iters, total_timesteps, and max_episodes only one should be specified' while True: if callback: callback(locals(), globals()) if total_timesteps and timesteps_so_far >= total_timesteps: break elif max_episodes and episodes_so_far >= max_episodes: break elif max_iters and iters_so_far >= max_iters: break logger.log("********** Iteration %i ************"%iters_so_far) with timed("sampling"): seg = seg_gen.__next__() add_vtarg_and_adv(seg, gamma, lam) # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets)) ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg["tdlamret"] vpredbefore = seg["vpred"] # predicted value function before udpate atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate if hasattr(pi, "ret_rms"): pi.ret_rms.update(tdlamret) if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) # update running mean/std for policy args = seg["ob"], seg["ac"], atarg fvpargs = [arr[::5] for arr in args] def fisher_vector_product(p): return allmean(compute_fvp(p, *fvpargs)) + cg_damping * p assign_old_eq_new() # set old parameter values to new parameter values with timed("computegrad"): *lossbefore, g = compute_lossandgrad(*args) lossbefore = allmean(np.array(lossbefore)) g = allmean(g) if np.allclose(g, 0): logger.log("Got zero gradient. not updating") else: with timed("cg"): stepdir = cg(fisher_vector_product, g, cg_iters=cg_iters, verbose=rank==0) assert np.isfinite(stepdir).all() shs = .5*stepdir.dot(fisher_vector_product(stepdir)) lm = np.sqrt(shs / max_kl) # logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g)) fullstep = stepdir / lm expectedimprove = g.dot(fullstep) surrbefore = lossbefore[0] stepsize = 1.0 thbefore = get_flat() for _ in range(10): thnew = thbefore + fullstep * stepsize set_from_flat(thnew) meanlosses = surr, kl, *_ = allmean(np.array(compute_losses(*args))) improve = surr - surrbefore logger.log("Expected: %.3f Actual: %.3f"%(expectedimprove, improve)) if not np.isfinite(meanlosses).all(): logger.log("Got non-finite value of losses -- bad!") elif kl > max_kl * 1.5: logger.log("violated KL constraint. shrinking step.") elif improve < 0: logger.log("surrogate didn't improve. shrinking step.") else: logger.log("Stepsize OK!") break stepsize *= .5 else: logger.log("couldn't compute a good step") set_from_flat(thbefore) if nworkers > 1 and iters_so_far % 20 == 0: paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), vfadam.getflat().sum())) # list of tuples assert all(np.allclose(ps, paramsums[0]) for ps in paramsums[1:]) for (lossname, lossval) in zip(loss_names, meanlosses): logger.record_tabular(lossname, lossval) with timed("vf"): for _ in range(vf_iters): for (mbob, mbret) in dataset.iterbatches((seg["ob"], seg["tdlamret"]), include_final_partial_batch=False, batch_size=64): g = allmean(compute_vflossandgrad(mbob, mbret)) vfadam.update(g, vf_stepsize) logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret)) lrlocal = (seg["ep_lens"], seg["ep_rets"]) # local values if MPI is not None: listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples else: listoflrpairs = [lrlocal] lens, rews = map(flatten_lists, zip(*listoflrpairs)) lenbuffer.extend(lens) rewbuffer.extend(rews) logger.record_tabular("EpLenMean", np.mean(lenbuffer)) logger.record_tabular("EpRewMean", np.mean(rewbuffer)) logger.record_tabular("EpThisIter", len(lens)) episodes_so_far += len(lens) timesteps_so_far += sum(lens) iters_so_far += 1 logger.record_tabular("EpisodesSoFar", episodes_so_far) logger.record_tabular("TimestepsSoFar", timesteps_so_far) logger.record_tabular("TimeElapsed", time.time() - tstart) if rank==0: logger.dump_tabular() return pi
learn a policy function with TRPO algorithm Parameters: ---------- network neural network to learn. Can be either string ('mlp', 'cnn', 'lstm', 'lnlstm' for basic types) or function that takes input placeholder and returns tuple (output, None) for feedforward nets or (output, (state_placeholder, state_output, mask_placeholder)) for recurrent nets env environment (one of the gym environments or wrapped via baselines.common.vec_env.VecEnv-type class timesteps_per_batch timesteps per gradient estimation batch max_kl max KL divergence between old policy and new policy ( KL(pi_old || pi) ) ent_coef coefficient of policy entropy term in the optimization objective cg_iters number of iterations of conjugate gradient algorithm cg_damping conjugate gradient damping vf_stepsize learning rate for adam optimizer used to optimie value function loss vf_iters number of iterations of value function optimization iterations per each policy optimization step total_timesteps max number of timesteps max_episodes max number of episodes max_iters maximum number of policy optimization iterations callback function to be called with (locals(), globals()) each policy optimization step load_path str, path to load the model from (default: None, i.e. no model is loaded) **network_kwargs keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network Returns: ------- learnt model
learn
python
openai/baselines
baselines/trpo_mpi/trpo_mpi.py
https://github.com/openai/baselines/blob/master/baselines/trpo_mpi/trpo_mpi.py
MIT
def get_task(benchmark, env_id): """Get a task by env_id. Return None if the benchmark doesn't have the env""" return next(filter(lambda task: task['env_id'] == env_id, benchmark['tasks']), None)
Get a task by env_id. Return None if the benchmark doesn't have the env
get_task
python
openai/baselines
baselines/bench/benchmarks.py
https://github.com/openai/baselines/blob/master/baselines/bench/benchmarks.py
MIT
def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4, vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95, log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2, save_interval=0, load_path=None, model_fn=None, update_fn=None, init_fn=None, mpi_rank_weight=1, comm=None, **network_kwargs): ''' Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347) Parameters: ---------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See common/models.py/lstm for more details on using recurrent nets in policies env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation. The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class. nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) total_timesteps: int number of timesteps (i.e. number of actions taken in the environment) ent_coef: float policy entropy coefficient in the optimization objective lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the training and 0 is the end of the training. vf_coef: float value function loss coefficient in the optimization objective max_grad_norm: float or None gradient norm clipping coefficient gamma: float discounting factor lam: float advantage estimation discounting factor (lambda in the paper) log_interval: int number of timesteps between logging events nminibatches: int number of training minibatches per update. For recurrent policies, should be smaller or equal than number of environments run in parallel. noptepochs: int number of training epochs per update cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training and 0 is the end of the training save_interval: int number of timesteps between saving events load_path: str path to load the model from **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers. ''' set_global_seeds(seed) if isinstance(lr, float): lr = constfn(lr) else: assert callable(lr) if isinstance(cliprange, float): cliprange = constfn(cliprange) else: assert callable(cliprange) total_timesteps = int(total_timesteps) policy = build_policy(env, network, **network_kwargs) # Get the nb of env nenvs = env.num_envs # Get state_space and action_space ob_space = env.observation_space ac_space = env.action_space # Calculate the batch_size nbatch = nenvs * nsteps nbatch_train = nbatch // nminibatches is_mpi_root = (MPI is None or MPI.COMM_WORLD.Get_rank() == 0) # Instantiate the model object (that creates act_model and train_model) if model_fn is None: from baselines.ppo2.model import Model model_fn = Model model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, comm=comm, mpi_rank_weight=mpi_rank_weight) if load_path is not None: model.load(load_path) # Instantiate the runner object runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam) if eval_env is not None: eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam) epinfobuf = deque(maxlen=100) if eval_env is not None: eval_epinfobuf = deque(maxlen=100) if init_fn is not None: init_fn() # Start total timer tfirststart = time.perf_counter() nupdates = total_timesteps//nbatch for update in range(1, nupdates+1): assert nbatch % nminibatches == 0 # Start timer tstart = time.perf_counter() frac = 1.0 - (update - 1.0) / nupdates # Calculate the learning rate lrnow = lr(frac) # Calculate the cliprange cliprangenow = cliprange(frac) if update % log_interval == 0 and is_mpi_root: logger.info('Stepping environment...') # Get minibatch obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632 if eval_env is not None: eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() #pylint: disable=E0632 if update % log_interval == 0 and is_mpi_root: logger.info('Done.') epinfobuf.extend(epinfos) if eval_env is not None: eval_epinfobuf.extend(eval_epinfos) # Here what we're going to do is for each minibatch calculate the loss and append it. mblossvals = [] if states is None: # nonrecurrent version # Index of each element of batch_size # Create the indices array inds = np.arange(nbatch) for _ in range(noptepochs): # Randomize the indexes np.random.shuffle(inds) # 0 to batch_size with batch_train_size step for start in range(0, nbatch, nbatch_train): end = start + nbatch_train mbinds = inds[start:end] slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs)) mblossvals.append(model.train(lrnow, cliprangenow, *slices)) else: # recurrent version assert nenvs % nminibatches == 0 envsperbatch = nenvs // nminibatches envinds = np.arange(nenvs) flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps) for _ in range(noptepochs): np.random.shuffle(envinds) for start in range(0, nenvs, envsperbatch): end = start + envsperbatch mbenvinds = envinds[start:end] mbflatinds = flatinds[mbenvinds].ravel() slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs)) mbstates = states[mbenvinds] mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates)) # Feedforward --> get losses --> update lossvals = np.mean(mblossvals, axis=0) # End timer tnow = time.perf_counter() # Calculate the fps (frame per second) fps = int(nbatch / (tnow - tstart)) if update_fn is not None: update_fn(update) if update % log_interval == 0 or update == 1: # Calculates if value function is a good predicator of the returns (ev > 1) # or if it's just worse than predicting nothing (ev =< 0) ev = explained_variance(values, returns) logger.logkv("misc/serial_timesteps", update*nsteps) logger.logkv("misc/nupdates", update) logger.logkv("misc/total_timesteps", update*nbatch) logger.logkv("fps", fps) logger.logkv("misc/explained_variance", float(ev)) logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf])) logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf])) if eval_env is not None: logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) ) logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) ) logger.logkv('misc/time_elapsed', tnow - tfirststart) for (lossval, lossname) in zip(lossvals, model.loss_names): logger.logkv('loss/' + lossname, lossval) logger.dumpkvs() if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir() and is_mpi_root: checkdir = osp.join(logger.get_dir(), 'checkpoints') os.makedirs(checkdir, exist_ok=True) savepath = osp.join(checkdir, '%.5i'%update) print('Saving to', savepath) model.save(savepath) return model
Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347) Parameters: ---------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See common/models.py/lstm for more details on using recurrent nets in policies env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation. The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class. nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) total_timesteps: int number of timesteps (i.e. number of actions taken in the environment) ent_coef: float policy entropy coefficient in the optimization objective lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the training and 0 is the end of the training. vf_coef: float value function loss coefficient in the optimization objective max_grad_norm: float or None gradient norm clipping coefficient gamma: float discounting factor lam: float advantage estimation discounting factor (lambda in the paper) log_interval: int number of timesteps between logging events nminibatches: int number of training minibatches per update. For recurrent policies, should be smaller or equal than number of environments run in parallel. noptepochs: int number of training epochs per update cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training and 0 is the end of the training save_interval: int number of timesteps between saving events load_path: str path to load the model from **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
learn
python
openai/baselines
baselines/ppo2/ppo2.py
https://github.com/openai/baselines/blob/master/baselines/ppo2/ppo2.py
MIT
def sf01(arr): """ swap and then flatten axes 0 and 1 """ s = arr.shape return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
swap and then flatten axes 0 and 1
sf01
python
openai/baselines
baselines/ppo2/runner.py
https://github.com/openai/baselines/blob/master/baselines/ppo2/runner.py
MIT
def discount(x, gamma): """ computes discounted sums along 0th dimension of x. inputs ------ x: ndarray gamma: float outputs ------- y: ndarray with same shape as x, satisfying y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k], where k = len(x) - t - 1 """ assert x.ndim >= 1 return scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1]
computes discounted sums along 0th dimension of x. inputs ------ x: ndarray gamma: float outputs ------- y: ndarray with same shape as x, satisfying y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k], where k = len(x) - t - 1
discount
python
openai/baselines
baselines/common/math_util.py
https://github.com/openai/baselines/blob/master/baselines/common/math_util.py
MIT
def explained_variance(ypred,y): """ Computes fraction of variance that ypred explains about y. Returns 1 - Var[y-ypred] / Var[y] interpretation: ev=0 => might as well have predicted zero ev=1 => perfect prediction ev<0 => worse than just predicting zero """ assert y.ndim == 1 and ypred.ndim == 1 vary = np.var(y) return np.nan if vary==0 else 1 - np.var(y-ypred)/vary
Computes fraction of variance that ypred explains about y. Returns 1 - Var[y-ypred] / Var[y] interpretation: ev=0 => might as well have predicted zero ev=1 => perfect prediction ev<0 => worse than just predicting zero
explained_variance
python
openai/baselines
baselines/common/math_util.py
https://github.com/openai/baselines/blob/master/baselines/common/math_util.py
MIT
def discount_with_boundaries(X, New, gamma): """ X: 2d array of floats, time x features New: 2d array of bools, indicating when a new episode has started """ Y = np.zeros_like(X) T = X.shape[0] Y[T-1] = X[T-1] for t in range(T-2, -1, -1): Y[t] = X[t] + gamma * Y[t+1] * (1 - New[t+1]) return Y
X: 2d array of floats, time x features New: 2d array of bools, indicating when a new episode has started
discount_with_boundaries
python
openai/baselines
baselines/common/math_util.py
https://github.com/openai/baselines/blob/master/baselines/common/math_util.py
MIT
def tile_images(img_nhwc): """ Tile N images into one big PxQ image (P,Q) are chosen to be as close as possible, and if N is square, then P=Q. input: img_nhwc, list or array of images, ndim=4 once turned into array n = batch index, h = height, w = width, c = channel returns: bigim_HWc, ndarray with ndim=3 """ img_nhwc = np.asarray(img_nhwc) N, h, w, c = img_nhwc.shape H = int(np.ceil(np.sqrt(N))) W = int(np.ceil(float(N)/H)) img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)]) img_HWhwc = img_nhwc.reshape(H, W, h, w, c) img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4) img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c) return img_Hh_Ww_c
Tile N images into one big PxQ image (P,Q) are chosen to be as close as possible, and if N is square, then P=Q. input: img_nhwc, list or array of images, ndim=4 once turned into array n = batch index, h = height, w = width, c = channel returns: bigim_HWc, ndarray with ndim=3
tile_images
python
openai/baselines
baselines/common/tile_images.py
https://github.com/openai/baselines/blob/master/baselines/common/tile_images.py
MIT
def observation_placeholder(ob_space, batch_size=None, name='Ob'): ''' Create placeholder to feed observations into of the size appropriate to the observation space Parameters: ---------- ob_space: gym.Space observation space batch_size: int size of the batch to be fed into input. Can be left None in most cases. name: str name of the placeholder Returns: ------- tensorflow placeholder tensor ''' assert isinstance(ob_space, Discrete) or isinstance(ob_space, Box) or isinstance(ob_space, MultiDiscrete), \ 'Can only deal with Discrete and Box observation spaces for now' dtype = ob_space.dtype if dtype == np.int8: dtype = np.uint8 return tf.placeholder(shape=(batch_size,) + ob_space.shape, dtype=dtype, name=name)
Create placeholder to feed observations into of the size appropriate to the observation space Parameters: ---------- ob_space: gym.Space observation space batch_size: int size of the batch to be fed into input. Can be left None in most cases. name: str name of the placeholder Returns: ------- tensorflow placeholder tensor
observation_placeholder
python
openai/baselines
baselines/common/input.py
https://github.com/openai/baselines/blob/master/baselines/common/input.py
MIT
def observation_input(ob_space, batch_size=None, name='Ob'): ''' Create placeholder to feed observations into of the size appropriate to the observation space, and add input encoder of the appropriate type. ''' placeholder = observation_placeholder(ob_space, batch_size, name) return placeholder, encode_observation(ob_space, placeholder)
Create placeholder to feed observations into of the size appropriate to the observation space, and add input encoder of the appropriate type.
observation_input
python
openai/baselines
baselines/common/input.py
https://github.com/openai/baselines/blob/master/baselines/common/input.py
MIT
def encode_observation(ob_space, placeholder): ''' Encode input in the way that is appropriate to the observation space Parameters: ---------- ob_space: gym.Space observation space placeholder: tf.placeholder observation input placeholder ''' if isinstance(ob_space, Discrete): return tf.to_float(tf.one_hot(placeholder, ob_space.n)) elif isinstance(ob_space, Box): return tf.to_float(placeholder) elif isinstance(ob_space, MultiDiscrete): placeholder = tf.cast(placeholder, tf.int32) one_hots = [tf.to_float(tf.one_hot(placeholder[..., i], ob_space.nvec[i])) for i in range(placeholder.shape[-1])] return tf.concat(one_hots, axis=-1) else: raise NotImplementedError
Encode input in the way that is appropriate to the observation space Parameters: ---------- ob_space: gym.Space observation space placeholder: tf.placeholder observation input placeholder
encode_observation
python
openai/baselines
baselines/common/input.py
https://github.com/openai/baselines/blob/master/baselines/common/input.py
MIT
def mpi_fork(n, bind_to_core=False): """Re-launches the current script with workers Returns "parent" for original parent, "child" for MPI children """ if n<=1: return "child" if os.getenv("IN_MPI") is None: env = os.environ.copy() env.update( MKL_NUM_THREADS="1", OMP_NUM_THREADS="1", IN_MPI="1" ) args = ["mpirun", "-np", str(n)] if bind_to_core: args += ["-bind-to", "core"] args += [sys.executable] + sys.argv subprocess.check_call(args, env=env) return "parent" else: return "child"
Re-launches the current script with workers Returns "parent" for original parent, "child" for MPI children
mpi_fork
python
openai/baselines
baselines/common/mpi_fork.py
https://github.com/openai/baselines/blob/master/baselines/common/mpi_fork.py
MIT
def __init__(self, capacity, operation, neutral_element): """Build a Segment Tree data structure. https://en.wikipedia.org/wiki/Segment_tree Can be used as regular array, but with two important differences: a) setting item's value is slightly slower. It is O(lg capacity) instead of O(1). b) user has access to an efficient ( O(log segment size) ) `reduce` operation which reduces `operation` over a contiguous subsequence of items in the array. Paramters --------- capacity: int Total size of the array - must be a power of two. operation: lambda obj, obj -> obj and operation for combining elements (eg. sum, max) must form a mathematical group together with the set of possible values for array elements (i.e. be associative) neutral_element: obj neutral element for the operation above. eg. float('-inf') for max and 0 for sum. """ assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2." self._capacity = capacity self._value = [neutral_element for _ in range(2 * capacity)] self._operation = operation
Build a Segment Tree data structure. https://en.wikipedia.org/wiki/Segment_tree Can be used as regular array, but with two important differences: a) setting item's value is slightly slower. It is O(lg capacity) instead of O(1). b) user has access to an efficient ( O(log segment size) ) `reduce` operation which reduces `operation` over a contiguous subsequence of items in the array. Paramters --------- capacity: int Total size of the array - must be a power of two. operation: lambda obj, obj -> obj and operation for combining elements (eg. sum, max) must form a mathematical group together with the set of possible values for array elements (i.e. be associative) neutral_element: obj neutral element for the operation above. eg. float('-inf') for max and 0 for sum.
__init__
python
openai/baselines
baselines/common/segment_tree.py
https://github.com/openai/baselines/blob/master/baselines/common/segment_tree.py
MIT
def reduce(self, start=0, end=None): """Returns result of applying `self.operation` to a contiguous subsequence of the array. self.operation(arr[start], operation(arr[start+1], operation(... arr[end]))) Parameters ---------- start: int beginning of the subsequence end: int end of the subsequences Returns ------- reduced: obj result of reducing self.operation over the specified range of array elements. """ if end is None: end = self._capacity if end < 0: end += self._capacity end -= 1 return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
Returns result of applying `self.operation` to a contiguous subsequence of the array. self.operation(arr[start], operation(arr[start+1], operation(... arr[end]))) Parameters ---------- start: int beginning of the subsequence end: int end of the subsequences Returns ------- reduced: obj result of reducing self.operation over the specified range of array elements.
reduce
python
openai/baselines
baselines/common/segment_tree.py
https://github.com/openai/baselines/blob/master/baselines/common/segment_tree.py
MIT
def sum(self, start=0, end=None): """Returns arr[start] + ... + arr[end]""" return super(SumSegmentTree, self).reduce(start, end)
Returns arr[start] + ... + arr[end]
sum
python
openai/baselines
baselines/common/segment_tree.py
https://github.com/openai/baselines/blob/master/baselines/common/segment_tree.py
MIT
def find_prefixsum_idx(self, prefixsum): """Find the highest index `i` in the array such that sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum if array values are probabilities, this function allows to sample indexes according to the discrete probability efficiently. Parameters ---------- perfixsum: float upperbound on the sum of array prefix Returns ------- idx: int highest index satisfying the prefixsum constraint """ assert 0 <= prefixsum <= self.sum() + 1e-5 idx = 1 while idx < self._capacity: # while non-leaf if self._value[2 * idx] > prefixsum: idx = 2 * idx else: prefixsum -= self._value[2 * idx] idx = 2 * idx + 1 return idx - self._capacity
Find the highest index `i` in the array such that sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum if array values are probabilities, this function allows to sample indexes according to the discrete probability efficiently. Parameters ---------- perfixsum: float upperbound on the sum of array prefix Returns ------- idx: int highest index satisfying the prefixsum constraint
find_prefixsum_idx
python
openai/baselines
baselines/common/segment_tree.py
https://github.com/openai/baselines/blob/master/baselines/common/segment_tree.py
MIT
def min(self, start=0, end=None): """Returns min(arr[start], ..., arr[end])""" return super(MinSegmentTree, self).reduce(start, end)
Returns min(arr[start], ..., arr[end])
min
python
openai/baselines
baselines/common/segment_tree.py
https://github.com/openai/baselines/blob/master/baselines/common/segment_tree.py
MIT
def make_vec_env(env_id, env_type, num_env, seed, wrapper_kwargs=None, env_kwargs=None, start_index=0, reward_scale=1.0, flatten_dict_observations=True, gamestate=None, initializer=None, force_dummy=False): """ Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo. """ wrapper_kwargs = wrapper_kwargs or {} env_kwargs = env_kwargs or {} mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0 seed = seed + 10000 * mpi_rank if seed is not None else None logger_dir = logger.get_dir() def make_thunk(rank, initializer=None): return lambda: make_env( env_id=env_id, env_type=env_type, mpi_rank=mpi_rank, subrank=rank, seed=seed, reward_scale=reward_scale, gamestate=gamestate, flatten_dict_observations=flatten_dict_observations, wrapper_kwargs=wrapper_kwargs, env_kwargs=env_kwargs, logger_dir=logger_dir, initializer=initializer ) set_global_seeds(seed) if not force_dummy and num_env > 1: return SubprocVecEnv([make_thunk(i + start_index, initializer=initializer) for i in range(num_env)]) else: return DummyVecEnv([make_thunk(i + start_index, initializer=None) for i in range(num_env)])
Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo.
make_vec_env
python
openai/baselines
baselines/common/cmd_util.py
https://github.com/openai/baselines/blob/master/baselines/common/cmd_util.py
MIT
def make_mujoco_env(env_id, seed, reward_scale=1.0): """ Create a wrapped, monitored gym.Env for MuJoCo. """ rank = MPI.COMM_WORLD.Get_rank() myseed = seed + 1000 * rank if seed is not None else None set_global_seeds(myseed) env = gym.make(env_id) logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank)) env = Monitor(env, logger_path, allow_early_resets=True) env.seed(seed) if reward_scale != 1.0: from baselines.common.retro_wrappers import RewardScaler env = RewardScaler(env, reward_scale) return env
Create a wrapped, monitored gym.Env for MuJoCo.
make_mujoco_env
python
openai/baselines
baselines/common/cmd_util.py
https://github.com/openai/baselines/blob/master/baselines/common/cmd_util.py
MIT
def make_robotics_env(env_id, seed, rank=0): """ Create a wrapped, monitored gym.Env for MuJoCo. """ set_global_seeds(seed) env = gym.make(env_id) env = FlattenObservation(FilterObservation(env, ['observation', 'desired_goal'])) env = Monitor( env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)), info_keywords=('is_success',)) env.seed(seed) return env
Create a wrapped, monitored gym.Env for MuJoCo.
make_robotics_env
python
openai/baselines
baselines/common/cmd_util.py
https://github.com/openai/baselines/blob/master/baselines/common/cmd_util.py
MIT
def arg_parser(): """ Create an empty argparse.ArgumentParser. """ import argparse return argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
Create an empty argparse.ArgumentParser.
arg_parser
python
openai/baselines
baselines/common/cmd_util.py
https://github.com/openai/baselines/blob/master/baselines/common/cmd_util.py
MIT
def atari_arg_parser(): """ Create an argparse.ArgumentParser for run_atari.py. """ print('Obsolete - use common_arg_parser instead') return common_arg_parser()
Create an argparse.ArgumentParser for run_atari.py.
atari_arg_parser
python
openai/baselines
baselines/common/cmd_util.py
https://github.com/openai/baselines/blob/master/baselines/common/cmd_util.py
MIT
def common_arg_parser(): """ Create an argparse.ArgumentParser for run_mujoco.py. """ parser = arg_parser() parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2') parser.add_argument('--env_type', help='type of environment, used when the environment type cannot be automatically determined', type=str) parser.add_argument('--seed', help='RNG seed', type=int, default=None) parser.add_argument('--alg', help='Algorithm', type=str, default='ppo2') parser.add_argument('--num_timesteps', type=float, default=1e6), parser.add_argument('--network', help='network type (mlp, cnn, lstm, cnn_lstm, conv_only)', default=None) parser.add_argument('--gamestate', help='game state to load (so far only used in retro games)', default=None) parser.add_argument('--num_env', help='Number of environment copies being run in parallel. When not specified, set to number of cpus for Atari, and to 1 for Mujoco', default=None, type=int) parser.add_argument('--reward_scale', help='Reward scale factor. Default: 1.0', default=1.0, type=float) parser.add_argument('--save_path', help='Path to save trained model to', default=None, type=str) parser.add_argument('--save_video_interval', help='Save video every x steps (0 = disabled)', default=0, type=int) parser.add_argument('--save_video_length', help='Length of recorded video. Default: 200', default=200, type=int) parser.add_argument('--log_path', help='Directory to save learning curve data.', default=None, type=str) parser.add_argument('--play', default=False, action='store_true') return parser
Create an argparse.ArgumentParser for run_mujoco.py.
common_arg_parser
python
openai/baselines
baselines/common/cmd_util.py
https://github.com/openai/baselines/blob/master/baselines/common/cmd_util.py
MIT
def robotics_arg_parser(): """ Create an argparse.ArgumentParser for run_mujoco.py. """ parser = arg_parser() parser.add_argument('--env', help='environment ID', type=str, default='FetchReach-v0') parser.add_argument('--seed', help='RNG seed', type=int, default=None) parser.add_argument('--num-timesteps', type=int, default=int(1e6)) return parser
Create an argparse.ArgumentParser for run_mujoco.py.
robotics_arg_parser
python
openai/baselines
baselines/common/cmd_util.py
https://github.com/openai/baselines/blob/master/baselines/common/cmd_util.py
MIT
def parse_unknown_args(args): """ Parse arguments not consumed by arg parser into a dictionary """ retval = {} preceded_by_key = False for arg in args: if arg.startswith('--'): if '=' in arg: key = arg.split('=')[0][2:] value = arg.split('=')[1] retval[key] = value else: key = arg[2:] preceded_by_key = True elif preceded_by_key: retval[key] = arg preceded_by_key = False return retval
Parse arguments not consumed by arg parser into a dictionary
parse_unknown_args
python
openai/baselines
baselines/common/cmd_util.py
https://github.com/openai/baselines/blob/master/baselines/common/cmd_util.py
MIT
def __init__(self, env, noop_max=30): """Sample initial states by taking random number of no-ops on reset. No-op is assumed to be action 0. """ gym.Wrapper.__init__(self, env) self.noop_max = noop_max self.override_num_noops = None self.noop_action = 0 assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
Sample initial states by taking random number of no-ops on reset. No-op is assumed to be action 0.
__init__
python
openai/baselines
baselines/common/atari_wrappers.py
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
MIT
def reset(self, **kwargs): """ Do no-op action for a number of steps in [1, noop_max].""" self.env.reset(**kwargs) if self.override_num_noops is not None: noops = self.override_num_noops else: noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101 assert noops > 0 obs = None for _ in range(noops): obs, _, done, _ = self.env.step(self.noop_action) if done: obs = self.env.reset(**kwargs) return obs
Do no-op action for a number of steps in [1, noop_max].
reset
python
openai/baselines
baselines/common/atari_wrappers.py
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
MIT
def __init__(self, env): """Take action on reset for environments that are fixed until firing.""" gym.Wrapper.__init__(self, env) assert env.unwrapped.get_action_meanings()[1] == 'FIRE' assert len(env.unwrapped.get_action_meanings()) >= 3
Take action on reset for environments that are fixed until firing.
__init__
python
openai/baselines
baselines/common/atari_wrappers.py
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
MIT
def __init__(self, env): """Make end-of-life == end-of-episode, but only reset on true game over. Done by DeepMind for the DQN and co. since it helps value estimation. """ gym.Wrapper.__init__(self, env) self.lives = 0 self.was_real_done = True
Make end-of-life == end-of-episode, but only reset on true game over. Done by DeepMind for the DQN and co. since it helps value estimation.
__init__
python
openai/baselines
baselines/common/atari_wrappers.py
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
MIT
def reset(self, **kwargs): """Reset only when lives are exhausted. This way all states are still reachable even though lives are episodic, and the learner need not know about any of this behind-the-scenes. """ if self.was_real_done: obs = self.env.reset(**kwargs) else: # no-op step to advance from terminal/lost life state obs, _, _, _ = self.env.step(0) self.lives = self.env.unwrapped.ale.lives() return obs
Reset only when lives are exhausted. This way all states are still reachable even though lives are episodic, and the learner need not know about any of this behind-the-scenes.
reset
python
openai/baselines
baselines/common/atari_wrappers.py
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
MIT
def __init__(self, env, skip=4): """Return only every `skip`-th frame""" gym.Wrapper.__init__(self, env) # most recent raw observations (for max pooling across time steps) self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8) self._skip = skip
Return only every `skip`-th frame
__init__
python
openai/baselines
baselines/common/atari_wrappers.py
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
MIT
def step(self, action): """Repeat action, sum reward, and max over last observations.""" total_reward = 0.0 done = None for i in range(self._skip): obs, reward, done, info = self.env.step(action) if i == self._skip - 2: self._obs_buffer[0] = obs if i == self._skip - 1: self._obs_buffer[1] = obs total_reward += reward if done: break # Note that the observation on the done=True frame # doesn't matter max_frame = self._obs_buffer.max(axis=0) return max_frame, total_reward, done, info
Repeat action, sum reward, and max over last observations.
step
python
openai/baselines
baselines/common/atari_wrappers.py
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
MIT
def reward(self, reward): """Bin reward to {+1, 0, -1} by its sign.""" return np.sign(reward)
Bin reward to {+1, 0, -1} by its sign.
reward
python
openai/baselines
baselines/common/atari_wrappers.py
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
MIT
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None): """ Warp frames to 84x84 as done in the Nature paper and later work. If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which observation should be warped. """ super().__init__(env) self._width = width self._height = height self._grayscale = grayscale self._key = dict_space_key if self._grayscale: num_colors = 1 else: num_colors = 3 new_space = gym.spaces.Box( low=0, high=255, shape=(self._height, self._width, num_colors), dtype=np.uint8, ) if self._key is None: original_space = self.observation_space self.observation_space = new_space else: original_space = self.observation_space.spaces[self._key] self.observation_space.spaces[self._key] = new_space assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
Warp frames to 84x84 as done in the Nature paper and later work. If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which observation should be warped.
__init__
python
openai/baselines
baselines/common/atari_wrappers.py
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
MIT
def __init__(self, env, k): """Stack k last frames. Returns lazy array, which is much more memory efficient. See Also -------- baselines.common.atari_wrappers.LazyFrames """ gym.Wrapper.__init__(self, env) self.k = k self.frames = deque([], maxlen=k) shp = env.observation_space.shape self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype)
Stack k last frames. Returns lazy array, which is much more memory efficient. See Also -------- baselines.common.atari_wrappers.LazyFrames
__init__
python
openai/baselines
baselines/common/atari_wrappers.py
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
MIT
def __init__(self, frames): """This object ensures that common frames between the observations are only stored once. It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay buffers. This object should only be converted to numpy array before being passed to the model. You'd not believe how complex the previous solution was.""" self._frames = frames self._out = None
This object ensures that common frames between the observations are only stored once. It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay buffers. This object should only be converted to numpy array before being passed to the model. You'd not believe how complex the previous solution was.
__init__
python
openai/baselines
baselines/common/atari_wrappers.py
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
MIT
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False): """Configure environment for DeepMind-style Atari. """ if episode_life: env = EpisodicLifeEnv(env) if 'FIRE' in env.unwrapped.get_action_meanings(): env = FireResetEnv(env) env = WarpFrame(env) if scale: env = ScaledFloatFrame(env) if clip_rewards: env = ClipRewardEnv(env) if frame_stack: env = FrameStack(env, 4) return env
Configure environment for DeepMind-style Atari.
wrap_deepmind
python
openai/baselines
baselines/common/atari_wrappers.py
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
MIT
def value(self, t): """Value of the schedule at time t""" raise NotImplementedError()
Value of the schedule at time t
value
python
openai/baselines
baselines/common/schedules.py
https://github.com/openai/baselines/blob/master/baselines/common/schedules.py
MIT
def __init__(self, value): """Value remains constant over time. Parameters ---------- value: float Constant value of the schedule """ self._v = value
Value remains constant over time. Parameters ---------- value: float Constant value of the schedule
__init__
python
openai/baselines
baselines/common/schedules.py
https://github.com/openai/baselines/blob/master/baselines/common/schedules.py
MIT
def value(self, t): """See Schedule.value""" return self._v
See Schedule.value
value
python
openai/baselines
baselines/common/schedules.py
https://github.com/openai/baselines/blob/master/baselines/common/schedules.py
MIT